[med-svn] [r-cran-bit64] 04/08: New upstream version 0.9-7

Andreas Tille tille at debian.org
Wed Nov 29 14:37:54 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-bit64.

commit d3a125e59be03c6d46e74d8c4dbfcc6e4cac0491
Author: Andreas Tille <tille at debian.org>
Date:   Wed Nov 29 15:35:32 2017 +0100

    New upstream version 0.9-7
---
 DESCRIPTION                       |   28 +
 MD5                               |   66 +
 NAMESPACE                         |  552 +++++++
 NEWS                              |  143 ++
 R/cache.R                         |  467 ++++++
 R/hash64.R                        |  410 +++++
 R/highlevel64.R                   | 3048 +++++++++++++++++++++++++++++++++++++
 R/integer64.R                     | 2443 +++++++++++++++++++++++++++++
 R/patch64.R                       |  166 ++
 R/sort64.R                        |  690 +++++++++
 R/sortuse64.R                     |  582 +++++++
 R/zzz.R                           |   37 +
 data/benchmark64.data.rda         |  Bin 0 -> 788 bytes
 data/optimizer64.data.rda         |  Bin 0 -> 2162 bytes
 debian/README.source              |   15 -
 debian/changelog                  |    5 -
 debian/compat                     |    1 -
 debian/control                    |   32 -
 debian/copyright                  |   16 -
 debian/rules                      |    8 -
 debian/source/format              |    1 -
 debian/watch                      |    2 -
 exec/make_rd.pl                   |   33 +
 exec/prebuild.sh                  |   17 +
 inst/ANNOUNCEMENT-0.8.txt         |   26 +
 inst/ANNOUNCEMENT-0.9-Details.txt |    1 +
 inst/ANNOUNCEMENT-0.9.txt         |   11 +
 inst/README_devel.txt             |   10 +
 man/as.character.integer64.rd     |   47 +
 man/as.data.frame.integer64.rd    |   37 +
 man/as.integer64.character.rd     |   48 +
 man/benchmark64.data.rd           |   39 +
 man/benchmark64.rd                |  133 ++
 man/bit64-package.rd              |  892 +++++++++++
 man/bit64S3.rd                    |  114 ++
 man/c.integer64.rd                |   39 +
 man/cache.rd                      |   92 ++
 man/cumsum.integer64.rd           |   42 +
 man/duplicated.integer64.rd       |   43 +
 man/extract.replace.integer64.rd  |   43 +
 man/format.integer64.rd           |   72 +
 man/hashcache.rd                  |   58 +
 man/hashmap.rd                    |  159 ++
 man/identical.integer64.rd        |   37 +
 man/is.sorted.integer64.rd        |   64 +
 man/keypos.rd                     |   42 +
 man/match.integer64.rd            |  125 ++
 man/optimizer64.data.rd           |  103 ++
 man/plusclass.rd                  |   33 +
 man/prank.rd                      |   39 +
 man/qtile.rd                      |   79 +
 man/ramsort.integer64.rd          |  117 ++
 man/rank.integer64.rd             |   39 +
 man/rep.integer64.rd              |   31 +
 man/seq.integer64.rd              |   43 +
 man/sort.integer64.rd             |   63 +
 man/sortnut.rd                    |  159 ++
 man/sum.integer64.rd              |   53 +
 man/table.integer64.rd            |  120 ++
 man/tiepos.rd                     |   43 +
 man/unipos.rd                     |   57 +
 man/unique.integer64.rd           |   58 +
 man/xor.integer64.rd              |   65 +
 src/Makevars                      |    1 +
 src/Makevars.win                  |    1 +
 src/bsearch.c                     |  267 ++++
 src/bsearch.h                     | 1245 +++++++++++++++
 src/cache.c                       |   55 +
 src/hash64.c                      |  541 +++++++
 src/init.c                        |  219 +++
 src/integer64.c                   |  958 ++++++++++++
 src/integer64.h                   |  334 ++++
 src/sort64.c                      | 2176 ++++++++++++++++++++++++++
 src/sort64.h                      |  545 +++++++
 src/sortuse64.c                   | 1302 ++++++++++++++++
 75 files changed, 19602 insertions(+), 80 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100644
index 0000000..b29c80f
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,28 @@
+Package: bit64
+Type: Package
+Title: A S3 Class for Vectors of 64bit Integers
+Version: 0.9-7
+Date: 2017-05-07
+Author: Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+Maintainer: Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+Depends: R (>= 3.0.1), bit (>= 1.1-12), utils, methods, stats
+Description: 
+ Package 'bit64' provides serializable S3 atomic 64bit (signed) integers. 
+ These are useful for handling database keys and exact counting in +-2^63.
+ WARNING: do not use them as replacement for 32bit integers, integer64 are not
+ supported for subscripting by R-core and they have different semantics when 
+ combined with double, e.g. integer64 + double => integer64. 
+ Class integer64 can be used in vectors, matrices, arrays and data.frames. 
+ Methods are available for coercion from and to logicals, integers, doubles, 
+ characters and factors as well as many elementwise and summary functions. 
+ Many fast algorithmic operations such as 'match' and 'order' support inter-
+ active data exploration and manipulation and optionally leverage caching.
+License: GPL-2
+LazyLoad: yes
+ByteCompile: yes
+URL: http://ff.r-forge.r-project.org/
+Encoding: UTF-8
+NeedsCompilation: yes
+Packaged: 2017-05-07 19:07:42 UTC; jo
+Repository: CRAN
+Date/Publication: 2017-05-08 13:21:40 UTC
diff --git a/MD5 b/MD5
new file mode 100644
index 0000000..6f946d4
--- /dev/null
+++ b/MD5
@@ -0,0 +1,66 @@
+73acb68c2bc5834db092ce54dece63e2 *DESCRIPTION
+622cf5defd3be87dd47ae8c89b4a1245 *NAMESPACE
+8076b58a4b8357cfc81280f3b95dd554 *NEWS
+0164771b7a15581e427accde14e3cd6e *R/cache.R
+0af32b8e421b56197218cca4dc4ff5c4 *R/hash64.R
+4b967dded58d570879b4003cf430da6f *R/highlevel64.R
+8fb255a6d08a4cfbf6cd03a5bad22089 *R/integer64.R
+d5baf40933b5d09a7e73a457d807f50c *R/patch64.R
+9d0bd33cd932864c60012c87a75a17f9 *R/sort64.R
+1a33c131430da5516effd5a4964b17d6 *R/sortuse64.R
+3f3a915ca58e2492843b8069076e3043 *R/zzz.R
+a5a307c4300ed9ca28df992777ea63c2 *data/benchmark64.data.rda
+f6446dee41e508e0be0c9f9f727e95c1 *data/optimizer64.data.rda
+f4a44e82447e3ecac68ac6d49cb18d34 *exec/make_rd.pl
+23fba14720be94643fac516a872d4909 *exec/prebuild.sh
+43769b6b11285e3965e8596623489833 *inst/ANNOUNCEMENT-0.8.txt
+95d3e36e2e547b1f2d43a1d96ed79bf0 *inst/ANNOUNCEMENT-0.9-Details.txt
+9dcd6f7db12489e423e98069ca9d780e *inst/ANNOUNCEMENT-0.9.txt
+23043832de5e1e9d49916ac1e5c0d2a1 *inst/README_devel.txt
+d7c39609420badb3ffdf0a4c4b11143e *man/as.character.integer64.rd
+02a3d9824f356221353af96f69d2653a *man/as.data.frame.integer64.rd
+55d5fb22185012beb7ef78bc8f3169e9 *man/as.integer64.character.rd
+f810d39205185058ed3de001054caf0b *man/benchmark64.data.rd
+cd9be7b466bb9c83c8491ec82ecad04e *man/benchmark64.rd
+9c058c6ac7da7a6068e97fe5bc9256b4 *man/bit64-package.rd
+f5f7da6ffe036a00f03f057a90161b36 *man/bit64S3.rd
+410f7df4b47d5a475c9a8db7c62fb043 *man/c.integer64.rd
+e093fc73cd9a14d2ffcea5d31d595bc6 *man/cache.rd
+f47da4eb336a52edf35b7129d6adc386 *man/cumsum.integer64.rd
+701ab8ead839c1efd4f253f84ede6591 *man/duplicated.integer64.rd
+a6522d206e62fb34e7bf2509ef9d3f42 *man/extract.replace.integer64.rd
+dc3bc759d6eb67955de19494bcf3242e *man/format.integer64.rd
+53f7e28cd36887ca0b202689c912610b *man/hashcache.rd
+8687c8380eee020e6587992ed81b4c32 *man/hashmap.rd
+3c1ea21f97f3020a22661a4839c5c363 *man/identical.integer64.rd
+6bc257ece53ef163df6c7de011b22c23 *man/is.sorted.integer64.rd
+8b3d27b7cfbdafa29a6671d94db3c206 *man/keypos.rd
+e23e050a2880bf08e4a9f580b519bc51 *man/match.integer64.rd
+877cd41c55a86ed636dd4024275b3a8f *man/optimizer64.data.rd
+22fa82a50370544b72561711e25ae626 *man/plusclass.rd
+508d2b301e75a75b088c78593eb2d938 *man/prank.rd
+6118e1f6084f9c18620fd66438360869 *man/qtile.rd
+cfd14dee5aaf989665444f039266e00f *man/ramsort.integer64.rd
+e716a537cd8581671c549bf887aa9e82 *man/rank.integer64.rd
+c37625f78585dac182e0d43af68c7b6b *man/rep.integer64.rd
+d6ddd25776e5dd6dcd99426f0a0ba048 *man/seq.integer64.rd
+57fac749382c759082d8336aa4f2f79e *man/sort.integer64.rd
+f85072bf8a29df90a523049fc88aac8e *man/sortnut.rd
+fbe2e721426d9a18c1e1422c143757a1 *man/sum.integer64.rd
+b42d4a70236010ee891ab1b64495ac60 *man/table.integer64.rd
+b7c48e95a6b6ec7d56ef5cf604710e8c *man/tiepos.rd
+c0beaf4d0283dda9359d26e387d518e7 *man/unipos.rd
+109afee392e0ceeb76d80515a045b4d9 *man/unique.integer64.rd
+0407a0fa39328691137f73aa75143255 *man/xor.integer64.rd
+d74975403fc135e3bee1d8d3ecd59bc9 *src/Makevars
+55b18979f3a2dc90ca67adead019e20e *src/Makevars.win
+440049706594c42d0d6bc9ce42b1a71f *src/bsearch.c
+ab3637aa195f4f25ac50467ac24de897 *src/bsearch.h
+18a11ecb49dba47ee85c6c0ce8b210b9 *src/cache.c
+7aa1939c5fe2d6107f11f5677513a4b6 *src/hash64.c
+d493e44121d13e4ff061d4b392d1470e *src/init.c
+8a0414a7e6691d82652b7c4fd3bde6cd *src/integer64.c
+df550357a5463f72df7cffff0c1adbed *src/integer64.h
+13888fcd2a9ebc1b43d9e90644ce084b *src/sort64.c
+1fa35d7223e92dec41817543c8110fc5 *src/sort64.h
+6d527b9a1ff89ea4eafdf105645301d7 *src/sortuse64.c
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100644
index 0000000..fc79244
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,552 @@
+# Namespace for bit64 (currently exporting (almost) everything in order to facilitate debugging)
+# S3 atomic 64 bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2012-10-07
+
+useDynLib(bit64, .registration = TRUE, .fixes = "C_")
+
+importFrom(methods, is)
+importFrom(methods, as)
+importFrom(utils, packageDescription)
+importFrom(utils, strOptions)
+importFrom(stats, quantile, median,cor)
+importFrom(graphics, barplot, par, title)
+
+importFrom(bit, setattr)
+importFrom(bit, clone)
+importFrom(bit, repeat.time)
+
+importFrom(bit, xor)
+importFrom(bit, ramsort)
+importFrom(bit, shellsort)
+importFrom(bit, quicksort)
+importFrom(bit, mergesort)
+importFrom(bit, radixsort)
+importFrom(bit, keysort)
+importFrom(bit, ramorder)
+importFrom(bit, shellorder)
+importFrom(bit, quickorder)
+importFrom(bit, mergeorder)
+importFrom(bit, radixorder)
+importFrom(bit, keyorder)
+importFrom(bit, ramsortorder)
+importFrom(bit, shellsortorder)
+importFrom(bit, quicksortorder)
+importFrom(bit, mergesortorder)
+importFrom(bit, radixsortorder)
+importFrom(bit, keysortorder)
+
+importFrom(bit, is.sorted)
+importFrom(bit, na.count)
+importFrom(bit, nvalid)
+importFrom(bit, nunique)
+importFrom(bit, nties)
+
+export(
+
+# == this is a complete list of R functions and metadata of this package sorted by filenames, non-exported functions are commented away ==
+
+# -- patch64.R - patch generics --
+  ":"
+, ":.default"
+, ":.integer64"
+, "is.double"
+, "is.double.default"
+, "is.double.integer64"
+, "match"
+, "match.default"
+, "%in%"
+, "%in%.default"
+, "rank"
+, "rank.default"
+#, "table"
+#, "table.default"
+, "order"
+, "order.default"
+
+
+# -- integer64.R - basic vector handling --
+
+,"binattr"
+,"plusclass"
+,"minusclass"
+
+ ,"integer64"
+ ,"identical.integer64"
+
+,"is.integer64"
+,"as.integer64"
+ 
+,"as.integer64.integer64"
+,"as.integer64.NULL"
+,"as.integer64.character"
+,"as.integer64.double"
+,"as.integer64.integer"
+,"as.integer64.logical"
+,"as.integer64.factor"
+,"NA_integer64_"
+
+,"as.character.integer64"
+,"as.double.integer64"
+,"as.integer.integer64"
+,"as.logical.integer64"
+
+,"as.bitstring"
+,"as.bitstring.integer64"
+
+#inherited: ,"length.integer64"
+,"length<-.integer64"
+
+,"[.integer64"
+,"[[.integer64"
+,"[[<-.integer64"
+,"[<-.integer64"
+
+,"str.integer64"
+,"print.integer64"
+,"format.integer64"
+,"is.vector.integer64"
+# as.vector.integer64 removed as requested by the CRAN maintainer ,"as.vector.integer64"
+,"is.na.integer64"
+,"is.nan.integer64"
+,"is.finite.integer64"
+,"is.infinite.integer64"
+,"all.integer64"
+,"any.integer64"
+
+,"!.integer64"
+,"&.integer64"
+,"|.integer64"
+,"xor.integer64"
+
+,"!=.integer64"
+,"==.integer64"
+,"<.integer64"
+,"<=.integer64"
+,">.integer64"
+,">=.integer64"
+
+,"+.integer64"
+,"-.integer64"
+,"*.integer64"
+,"^.integer64"
+,"/.integer64"
+,"%/%.integer64"
+,"%%.integer64"
+
+,"sign.integer64"
+,"abs.integer64"
+,"sqrt.integer64"
+,"log.integer64"
+,"log2.integer64"
+,"log10.integer64"
+,"floor.integer64"
+,"ceiling.integer64"
+,"trunc.integer64"
+,"round.integer64"
+,"signif.integer64"
+,"scale.integer64"
+
+,"c.integer64"
+,"rep.integer64"
+,"seq.integer64"
+,"cbind.integer64"
+,"rbind.integer64"
+,"as.data.frame.integer64"
+
+,"min.integer64"
+,"max.integer64"
+,"range.integer64"
+,"lim.integer64" # not a method
+,"sum.integer64"
+,"prod.integer64"
+
+,"diff.integer64"
+,"cummin.integer64"
+,"cummax.integer64"
+,"cumsum.integer64"
+,"cumprod.integer64"
+
+
+
+# -- sort64.R - sorting --
+, sort.integer64
+, order.integer64
+#, ramsort
+, ramsort.integer64
+#, ramorder
+, ramorder.integer64
+#, ramsortorder
+, ramsortorder.integer64
+#, mergesort
+, mergesort.integer64
+#, mergeorder
+, mergeorder.integer64
+#, mergesortorder
+, mergesortorder.integer64
+#, shellsort
+, shellsort.integer64
+#, shellorder
+, shellorder.integer64
+#, shellsortorder
+, shellsortorder.integer64
+#, quicksort
+, quicksort.integer64
+#, quickorder
+, quickorder.integer64
+#, quicksortorder
+, quicksortorder.integer64
+#, radixsort
+, radixsort.integer64
+#, radixsortorder
+, radixsortorder.integer64
+#, radixorder
+, radixorder.integer64
+
+# -- sortmerge64.R - searching & merging --
+
+, sortnut
+, sortnut.integer64
+, ordernut
+, ordernut.integer64
+, sortfin
+, sortfin.integer64
+, orderfin
+, orderfin.integer64
+, orderpos
+, orderpos.integer64
+, sortorderpos
+, sortorderpos.integer64
+, sortuni
+, sortuni.integer64
+, orderuni
+, orderuni.integer64
+, sortorderuni
+, sortorderuni.integer64
+, orderupo
+, orderupo.integer64
+, sortorderupo
+, sortorderupo.integer64
+, ordertie
+, ordertie.integer64
+, sortordertie
+, sortordertie.integer64
+, orderdup
+, orderdup.integer64
+, sortorderdup
+, sortorderdup.integer64
+, sorttab
+, sorttab.integer64
+, ordertab
+, ordertab.integer64
+, sortordertab
+, sortordertab.integer64
+, orderrnk
+, orderrnk.integer64
+, sortorderrnk
+, sortorderrnk.integer64
+, sortqtl
+, sortqtl.integer64
+, orderqtl
+, orderqtl.integer64
+, orderkey
+, orderkey.integer64
+, sortorderkey
+, sortorderkey.integer64
+
+# -- hash64.R - matching --
+
+, hashfun
+, hashfun.integer64
+, hashmap
+, hashmap.integer64
+, hashmaptab
+, hashmaptab.integer64
+, hashmapuni
+, hashmapuni.integer64
+, hashmapupo
+, hashmapupo.integer64
+, hashpos
+, hashpos.cache_integer64
+, hashrev
+, hashrev.cache_integer64
+, hashfin
+, hashfin.cache_integer64
+, hashrin
+, hashrin.cache_integer64
+, hashdup
+, hashdup.cache_integer64
+, hashuni
+, hashuni.cache_integer64
+, hashupo
+, hashupo.cache_integer64
+, hashtab
+, hashtab.cache_integer64
+
+# -- cache.R - matching --
+
+, still.identical
+, newcache
+, jamcache
+, cache
+, setcache
+, getcache
+, remcache
+, print.cache
+, hashcache
+, sortcache
+, ordercache
+, sortordercache
+, na.count.integer64
+, nvalid.integer64
+, nunique.integer64
+, nties.integer64
+, is.sorted.integer64
+
+
+# -- highlevel64.R - matching --
+
+, benchmark64
+, optimizer64
+, match.integer64
+, "%in%.integer64"
+, unique.integer64
+, unipos
+, unipos.integer64
+, tiepos
+, tiepos.integer64
+, keypos
+, keypos.integer64
+, duplicated.integer64
+, table.integer64
+, rank.integer64
+, prank
+, prank.integer64
+, qtile
+, qtile.integer64
+, quantile.integer64
+, median.integer64
+, mean.integer64
+, summary.integer64
+
+# -- zzz.R --
+
+#,.Last.lib
+)
+
+
+# -- patch64.R - patch generics --
+
+S3method(":", default)
+S3method(":", integer64)
+S3method("is.double", default)
+S3method("is.double", integer64)
+S3method("match", default)
+S3method("%in%", default)
+S3method("rank", default)
+#S3method("table", default)
+S3method("order", default)
+
+
+# -- integer64.R - basic vector handling --
+
+S3method("identical", integer64)
+
+S3method("is", integer64)
+S3method("as", integer64)
+
+S3method("as.integer64", integer64)
+S3method("as.integer64", NULL)
+S3method("as.integer64", character)
+S3method("as.integer64", double)
+S3method("as.integer64", integer)
+S3method("as.integer64", logical)
+S3method("as.integer64", factor)
+
+S3method("as.character", integer64)
+S3method("as.double", integer64)
+S3method("as.integer", integer64)
+S3method("as.logical", integer64)
+
+S3method("as", bitstring)
+S3method("as.bitstring", integer64)
+
+#inherited: S3method("length", integer64)
+S3method("length<-", integer64)
+
+S3method("[", integer64)
+S3method("[[", integer64)
+S3method("[[<-", integer64)
+S3method("[<-", integer64)
+
+S3method("str", integer64)
+S3method("print", integer64)
+S3method("format", integer64)
+S3method("is.vector", integer64)
+# as.vector.integer64 removed as requested by the CRAN maintainer S3method("as.vector", integer64)
+S3method("is.na", integer64)
+S3method("is.nan", integer64)
+S3method("is.finite", integer64)
+S3method("is.infinite", integer64)
+S3method("all", integer64)
+S3method("any", integer64)
+
+S3method("!", integer64)
+S3method("&", integer64)
+S3method("|", integer64)
+S3method("xor", integer64)
+
+S3method("!=", integer64)
+S3method("==", integer64)
+S3method("<", integer64)
+S3method("<=", integer64)
+S3method(">", integer64)
+S3method(">=", integer64)
+
+S3method("+", integer64)
+S3method("-", integer64)
+S3method("*", integer64)
+S3method("^", integer64)
+S3method("/", integer64)
+S3method("%/%", integer64)
+S3method("%%", integer64)
+
+S3method("sign", integer64)
+S3method("abs", integer64)
+S3method("sqrt", integer64)
+S3method("log", integer64)
+S3method("log2", integer64)
+S3method("log10", integer64)
+S3method("floor", integer64)
+S3method("ceiling", integer64)
+S3method("trunc", integer64)
+S3method("round", integer64)
+S3method("signif", integer64)
+S3method("scale", integer64)
+
+S3method("c", integer64)
+S3method("rep", integer64)
+S3method("seq", integer64)
+#S3method(":", default)
+#S3method(":", integer64)
+S3method("cbind", integer64)
+S3method("rbind", integer64)
+S3method("as.data.frame", integer64)
+
+S3method("min", integer64)
+S3method("max", integer64)
+S3method("range", integer64)
+S3method("sum", integer64)
+S3method("prod", integer64)
+
+S3method("diff", integer64)
+S3method("cummin", integer64)
+S3method("cummax", integer64)
+S3method("cumsum", integer64)
+S3method("cumprod", integer64)
+
+
+# -- sort64.R - sorting --
+
+S3method("sort", integer64)
+S3method("order", integer64)
+S3method("ramsort", integer64)
+S3method("ramorder", integer64)
+S3method("ramsortorder", integer64)
+S3method("shellsort", integer64)
+S3method("shellorder", integer64)
+S3method("shellsortorder", integer64)
+S3method("mergesort", integer64)
+S3method("mergeorder", integer64)
+S3method("mergesortorder", integer64)
+S3method("quicksort", integer64)
+S3method("quickorder", integer64)
+S3method("quicksortorder", integer64)
+S3method("radixsort", integer64)
+S3method("radixsortorder", integer64)
+S3method("radixorder", integer64)
+
+
+# -- sortmerge64.R - searching & merging --
+S3method("sortnut", integer64)
+S3method("ordernut", integer64)
+
+S3method("sortfin", integer64)
+S3method("orderfin", integer64)
+
+S3method("orderpos", integer64)
+S3method("sortorderpos", integer64)
+
+S3method("sortuni", integer64)
+S3method("orderuni", integer64)
+S3method("sortorderuni", integer64)
+
+S3method("orderupo", integer64)
+S3method("sortorderupo", integer64)
+
+S3method("ordertie", integer64)
+S3method("sortordertie", integer64)
+
+S3method("orderdup", integer64)
+S3method("sortorderdup", integer64)
+
+S3method("sorttab", integer64)
+S3method("ordertab", integer64)
+S3method("sortordertab", integer64)
+
+S3method("orderkey", integer64)
+S3method("sortorderkey", integer64)
+
+S3method("orderrnk", integer64)
+S3method("sortorderrnk", integer64)
+
+S3method("sortqtl", integer64)
+S3method("orderqtl", integer64)
+
+
+# -- hash64.R - matching --
+
+S3method("hashfun", integer64)
+S3method("hashmap", integer64)
+S3method("hashmaptab", integer64)
+S3method("hashmapuni", integer64)
+S3method("hashmapupo", integer64)
+S3method("hashpos", cache_integer64)
+S3method("hashrev", cache_integer64)
+S3method("hashfin", cache_integer64)
+S3method("hashrin", cache_integer64)
+S3method("hashdup", cache_integer64)
+S3method("hashuni", cache_integer64)
+S3method("hashupo", cache_integer64)
+S3method("hashtab", cache_integer64)
+
+# -- cache.R - matching --
+
+S3method("print", cache)
+S3method("na.count", integer64)
+S3method("nvalid", integer64)
+S3method("nunique", integer64)
+S3method("nties", integer64)
+S3method("is.sorted", integer64)
+
+# -- highlevel64.R - matching --
+
+S3method("match", integer64)
+S3method("%in%", integer64)
+S3method("unique", integer64)
+S3method("unipos", integer64)
+S3method("tiepos", integer64)
+S3method("keypos", integer64)
+S3method("duplicated", integer64)
+#S3method("table", integer64)
+S3method("rank", integer64)
+S3method("prank", integer64)
+S3method("qtile", integer64)
+S3method("quantile", integer64)
+S3method("median", integer64)
+S3method("mean", integer64)
+S3method("summary", integer64)
+
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..e6a7457
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,143 @@
+    CHANGES IN bit64 VERSION 0.9-7
+
+BUG FIXES
+
+    o All .Call routines are now registered
+
+
+
+    CHANGES IN bit64 VERSION 0.9-6
+
+NEW FEATURES
+
+    o New method str.integer64 shows the integer64 
+      and no longer the underlying double
+      (wish of Matt Dowle)
+    o New integer64 methods is.nan, is.finite, is.infinite
+      (wish of Florent Angly
+
+
+USER VISIBLE CHANGES
+
+    o as.integer64.double and as.double.integer64
+      now have an argument keep.names=FALSE
+      (wish of Dirk Edelbüttel and Leonardo Silvestri)
+
+	
+BUG FIXES
+
+    o We now protect our SEXP return-vector before calling R_Busy
+      (reported by Thjomas Kalibera)
+    o median.integer64 now gets a ... argument if the generic has it
+      (wish of Kurt Hornik)
+    o we migrated all files to UTF-8
+
+    
+
+    CHANGES IN bit64 VERSION 0.9-5
+
+USER VISIBLE CHANGES
+
+    o The following functions are converted to S3 generics and mask package:base
+      :, is.double, match, %in%, rank, order
+    o NA_integer64_ is now available and exported
+
+	
+BUG FIXES
+
+    o ramsort.integer64 no longer complains about misssing return 
+      value when stable || optimize == "time" (reported by Dan Southern)
+    o removed a harmless warning on request of CRAN maintainers
+      gcc had complained about using %lli format which is not 
+      supported under the windows MCPP compiler, under which
+      %lli and thus as.character.integer64 will fail.
+    o now uses R's RNG instead of the system RNG
+
+    
+
+    CHANGES IN bit64 VERSION 0.9-4
+
+BUG FIXES
+
+    o The packages now uses clone(x) instead of x[]
+    o log(x) tests no longer fail under valgrind
+      (Thanks to Heisenberg it only failed under Valgrind)
+    o UBSAN should no longer complain about left shift
+
+    
+
+    CHANGES IN bit64 VERSION 0.9-3
+
+USER VISIBLE CHANGES
+
+    o The following functions are converted to S3 generics and mask package:base
+      :, is.double, match, %in%, rank, order
+    o table.integer64 now automatically converts non-integer64 arguments to integer64
+      rather than stopping on error (but gives a warning for each column) 
+    o table.integer64 called with return="table" returns empty cells now with 
+      0 rather than NA
+    o %in%.integer64 no longer has arguments 'nunique' and 'method' in order
+      to match the generic with only two arguments 'x', 'table' and ...
+
+
+BUG FIXES
+
+    o c(x,x,x) failed with integer64 type because R no longer copies the 
+      arguments in list(...) as from R-3.0.2 . Presumably now the ugly 
+      workaround in table.integer64 is no longer needed but that has NOT 
+      been fixed yet
+    o round.integer64 no longer removes the "integer64" class attribute
+      (reported by Dan Southern)
+
+    
+
+    CHANGES IN bit64 VERSION 0.9-2
+
+BUG FIXES
+
+    o match.integer64 (and %in%.integer64) now call correctly with 
+      method="hashpos" and method="hashrev"
+    o removed platform specific timing code that was not needed 
+      and prevented compiling under MacOS
+
+
+    
+
+    CHANGES IN bit64 VERSION 0.9-1
+
+    
+NEW FEATURES
+
+    o new methods for 'match', '%in%', 'duplicated', 'unique', 'table'
+      , 'sort', 'order', 'rank', 'quantile', 'median' and 'summary'
+    o new generics and methods for data management: 
+        'unipos' (positions of the unique values)
+      , 'tiepos' (positions of ties)
+      , 'keypos' (positions of values in a sorted unique table) 
+      and derived methods 'as.factor' and 'as.ordered'
+    o new generic caching scheme, see ?cache and ?hashcache
+    o new low level functions for fast sorting, ordering and hashing,
+    see ?sortnut and ?hashmap
+
+    
+    
+USER VISIBLE CHANGES
+
+    o the package is back on CRAN. Method 'as.vector.integer64' has been removed 
+      at request of the CRAN maintainer. The starting point for this request was: 
+      'matrix(integer64())' does not work. The result of removing 
+      'as.vector.integer64' is a deterioration: 'array(integer64())' does not work 
+      anymore. You can restore 'as.vector.integer64' if you prefer.
+    o package 'bit64' now shares generics for low-level sorting with package 'ff' 
+      and depends on package 'bit' for those generics
+      
+
+
+      
+    CHANGES IN bit64 VERSION 0.8-3
+
+
+FIXES
+
+    o removed chonst char warning (thanks to Murray Stokely)
+    o reduced R dependency down to version 2.12.1 (wish of Murray Stokely)
diff --git a/R/cache.R b/R/cache.R
new file mode 100644
index 0000000..ceea275
--- /dev/null
+++ b/R/cache.R
@@ -0,0 +1,467 @@
+# /*
+# R-Code for caching
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+# */
+
+#! \name{cache}
+#! \alias{cache}
+#! \alias{newcache}
+#! \alias{jamcache}
+#! \alias{setcache}
+#! \alias{getcache}
+#! \alias{remcache}
+#! \alias{print.cache}
+#! \alias{still.identical}
+#! \title{
+#! 	Atomic Caching
+#! }
+#! \description{
+#! 	Functions for caching results attached to atomic objects
+#! }
+#! \usage{
+#! newcache(x)
+#! jamcache(x)
+#! cache(x)
+#! setcache(x, which, value)
+#! getcache(x, which)
+#! remcache(x)
+#! \method{print}{cache}(x, all.names = FALSE, pattern, \dots)
+#! still.identical(x, y)
+#! }
+#! \arguments{
+#!   \item{x}{
+#!   an integer64 vector (or a cache object in case of \code{print.cache})
+#! }
+#!   \item{y}{
+#!   an integer64 vector
+#! }
+#!   \item{which}{
+#!   A character naming the object to be retrieved from the cache or to be stored in the cache
+#! }
+#!   \item{value}{
+#!   An object to be stored in the cache 
+#! }
+#!   \item{all.names}{
+#!   passed to \code{\link{ls}} when listing the cache content
+#! }
+#!   \item{pattern}{
+#!   passed to \code{\link{ls}} when listing the cache content
+#! }
+#!   \item{\dots}{
+#! 	ignored
+#! }
+#! }
+#! \details{
+#! 	A \code{cache} is an \code{link{environment}} attached to an atomic object with the \code{link{attrib}} name 'cache'. 
+#! 	It contains at least a reference to the atomic object that carries the cache. 
+#! 	This is used when accessing the cache to detect whether the object carrying the cache has been modified meanwhile.
+#! 	Function \code{still.identical(x,y)} checks whether the objects \code{x} and \code{y} \cr
+#! 	Function \code{newcache(x)} creates a new cache referencing  \code{x} \cr
+#! 	Function \code{jamcache(x)} forces \code{x} to have a cache \cr
+#! 	Function \code{cache(x)} returns the cache attached to \code{x} if it is not found to be outdated \cr
+#! 	Function \code{setcache(x, which, value)} assigns a value into the cache of \code{x} \cr
+#! 	Function \code{getcache(x, which)} gets cache value 'which' from \code{x} \cr
+#! 	Function \code{remcache} removes the cache from \code{x} \cr
+#! }
+#! \value{
+#! 	see details
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#! 	Functions that get and set small cache-content automatically when a cache is present: \code{\link{na.count}}, \code{\link{nvalid}}, \code{\link{is.sorted}}, \code{\link{nunique}} and \code{\link{nties}} \cr
+#! 	Setting big caches with a relevant memory footprint requires a conscious decision of the user: \code{\link{hashcache}}, \code{\link{sortcache}}, \code{\link{ordercache}} and \code{\link{sortordercache}} \cr
+#! 	Functions that use big caches: \code{\link{match.integer64}}, \code{\link{\%in\%.integer64}}, \code{\link{duplicated.integer64}}, \code{\link{unique.integer64}}, \code{\link{unipos}}, \code{\link{table.integer64}}, \code{\link{as.factor.integer64}}, \code{\link{as.ordered.integer64}}, \code{\link{keypos}}, \code{\link{tiepos}}, \code{\link{rank.integer64}}, \code{\link{prank}}, \code{\link{qtile}}, \code{\link{quantile.integer64}}, \code{\link{median.integer64}} and \code{\link{summa [...]
+#! }
+#! \examples{
+#! 	x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! 	y <- x
+#! 	still.identical(x,y)
+#! 	y[1] <- NA
+#! 	still.identical(x,y)
+#! 	mycache <- newcache(x)
+#! 	ls(mycache)
+#! 	mycache
+#! 	rm(mycache)
+#! 	jamcache(x)
+#! 	cache(x)
+#! 	x[1] <- NA
+#! 	cache(x)
+#! 	getcache(x, "abc")
+#! 	setcache(x, "abc", 1)
+#! 	getcache(x, "abc")
+#! 	remcache(x)
+#! 	cache(x)
+#! }
+#! \keyword{ environment }
+
+still.identical <- function(x, y){
+  .Call(C_r_ram_truly_identical, x = x, y = y, PACKAGE = "bit64")
+}
+
+newcache <- function(x){
+	env <- new.env()
+	vmode <- typeof(x)
+	if (vmode=="double" && is.integer64(x))
+	  vmode <- "integer64"
+	setattr(env, "class", c(paste("cache", vmode, sep="_"),"cache","environment"))
+	assign("x", x, envir=env)
+	env
+}
+
+jamcache <- function(x){
+	cache <- attr(x, "cache")
+	if (is.null(cache)){
+		cache <- newcache(x)
+		setattr(x, "cache", cache)
+	}else
+		if (!still.identical(x, get("x", envir=cache, inherits=FALSE))){
+			cache <- newcache(x)
+			setattr(x, "cache", cache)
+			warning("replaced outdated cache with empty cache")
+		}
+	cache
+}
+
+cache <- function(x){
+	cache <- attr(x, "cache")
+	if (is.null(cache) || still.identical(x, get("x", envir=cache, inherits=FALSE)))
+		cache
+	else{ 
+		remcache(x)
+		warning("removed outdated cache")
+		NULL
+	}
+}
+
+setcache <- function(x, which, value){
+	  env <- jamcache(x)
+	  assign(which, value, envir=env)
+	  env
+}
+
+getcache <- function(x, which){
+	cache <- attr(x, "cache")
+	if (is.null(cache))
+	  return(NULL)
+	if (still.identical(x, get("x", envir=cache, inherits=FALSE))){
+		if (exists(which, envir=cache, inherits=FALSE))
+			get(which, envir=cache, inherits=FALSE)
+		else
+			NULL
+	}else{ 
+		remcache(x)
+		warning("removed outdated cache")
+		NULL
+	}
+}
+
+remcache <- function(x){
+		setattr(x, "cache", NULL)
+	invisible()
+}
+
+print.cache<- function(x, all.names=FALSE, pattern, ...){
+  l <- ls(x, all.names, pattern=pattern)
+  cat(class(x)[1], ": ", paste(l, collapse=" - "), "\n", sep="")
+  invisible(l)
+}
+
+
+#! \name{hashcache}
+#! \alias{hashcache}
+#! \alias{sortcache}
+#! \alias{sortordercache}
+#! \alias{ordercache}
+#! \title{
+#! 		Big caching of hashing, sorting, ordering
+#! }
+#! \description{
+#! 	Functions to create cache that accelerates many operations
+#! }
+#! \usage{
+#! hashcache(x, nunique=NULL, \dots)
+#! sortcache(x, has.na = NULL)
+#! sortordercache(x, has.na = NULL, stable = NULL)
+#! ordercache(x, has.na = NULL, stable = NULL, optimize = "time")
+#! }
+#! \arguments{
+#!   \item{x}{
+#! 		an atomic vector (note that currently only integer64 is supported)
+#! }
+#!   \item{nunique}{ giving \emph{correct} number of unique elements can help reducing the size of the hashmap }
+#!   \item{has.na}{
+#! boolean scalar defining whether the input vector might contain \code{NA}s. If we know we don't have NAs, this may speed-up.
+#! \emph{Note} that you risk a crash if there are unexpected \code{NA}s with \code{has.na=FALSE}
+#! }
+#!   \item{stable}{
+#! boolean scalar defining whether stable sorting is needed. Allowing non-stable may speed-up.
+#! }
+#!   \item{optimize}{
+#! by default ramsort optimizes for 'time' which requires more RAM,
+#! set to 'memory' to minimize RAM requirements and sacrifice speed
+#! }
+#!   \item{\dots}{
+#! 		passed to \code{\link{hashmap}}
+#! }
+#! }
+#! \details{
+#! 	The result of relative expensive operations \code{\link{hashmap}}, \code{\link{ramsort}}, \code{\link{ramsortorder}} and \code{\link{ramorder}} can be stored in a cache in order to avoid multiple excutions. Unless in very specific situations, the recommended method is \code{hashsortorder} only.
+#! }
+#! \note{
+#!   Note that we consider storing the big results from sorting and/or ordering as a relevant side-effect, 
+#! and therefore storing them in the cache should require a conscious decision of the user.
+#! }
+#! \value{
+#! 	\code{x} with a \code{\link{cache}} that contains the result of the expensive operations, possible together with small derived information (such as \code{\link{nunique.integer64}}) and previously cached results.
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#! 	\code{\link{cache}} for caching functions and \code{\link{nunique}} for methods bennefitting from small caches
+#! }
+#! \examples{
+#! 	x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#!  sortordercache(x)
+#! }
+#! \keyword{ environment }
+
+hashcache <-function(x, nunique=NULL, ...){
+	env <- jamcache(x)
+	if (is.null(nunique))
+		nunique <- env$nunique
+	env <- hashmap(x, nunique=nunique, cache=env, ...)
+	if (is.null(nunique) && env$nunique<sqrt(length(x)))
+		env <- hashmap(x, nunique=env$nunique, cache=env, ...)
+	na.count(x) # since x has cache, na.count() will update the cache, unless its already there
+	# different from sortcache, ordercache and sortordercache we do not set nties: hastab is too expensive
+	invisible(env)
+}
+
+sortcache <- function(x, has.na = NULL){
+	if (is.null(has.na)){
+		na.count <- getcache(x, "na.count")
+		if (is.null(na.count))
+			has.na <- TRUE
+		else
+			has.na <- na.count > 0
+	}
+	s <- clone(x)
+    na.count <- ramsort(s, has.na = has.na, na.last = FALSE, decreasing = FALSE, stable = FALSE, optimize = "time")
+	nut <- .Call(C_r_ram_integer64_sortnut, x = s, PACKAGE = "bit64")
+    setcache(x, "sort", s)
+    setcache(x, "na.count", na.count)
+    setcache(x, "nunique", nut[[1]])
+    setcache(x, "nties", nut[[2]])
+	invisible(x)
+}
+
+
+sortordercache <- function(x, has.na = NULL, stable = NULL){
+	if (is.null(has.na)){
+		na.count <- getcache(x, "na.count")
+		if (is.null(na.count))
+			has.na <- TRUE
+		else
+			has.na <- na.count > 0
+	}
+	if (is.null(stable)){
+		nunique <- getcache(x, "nunique")
+		if (is.null(nunique))
+		  stable <- TRUE
+		else
+		  stable <- nunique < length(x)
+	}
+	s <- clone(x)
+	o <- seq_along(x)
+    na.count <- ramsortorder(s, o, has.na = has.na, na.last = FALSE, decreasing = FALSE, stable = stable, optimize = "time")
+	nut <- .Call(C_r_ram_integer64_sortnut, x = s, PACKAGE = "bit64")
+    setcache(x, "sort", s)
+    setcache(x, "order", o)
+    setcache(x, "na.count", na.count)
+    setcache(x, "nunique", nut[[1]])
+    setcache(x, "nties", nut[[2]])
+	invisible(x)
+}
+
+
+ordercache <- function(x, has.na = NULL, stable = NULL, optimize = "time"){
+	if (is.null(has.na)){
+		na.count <- getcache(x, "na.count")
+		if (is.null(na.count))
+			has.na <- TRUE
+		else
+			has.na <- na.count > 0
+	}
+	if (is.null(stable)){
+		nunique <- getcache(x, "nunique")
+		if (is.null(nunique))
+		  stable <- TRUE
+		else
+		  stable <- nunique < length(x)
+	}
+	o <- seq_along(x)
+    na.count <- ramorder(x, o, has.na = has.na, na.last = FALSE, decreasing = FALSE, stable = stable, optimize = optimize)
+	nut <- .Call(C_r_ram_integer64_ordernut, table = x, order = o, PACKAGE = "bit64")
+    setcache(x, "order", o)
+    setcache(x, "na.count", na.count)
+    setcache(x, "nunique", nut[[1]])
+    setcache(x, "nties", nut[[2]])
+	invisible(x)
+}
+
+
+
+#! \name{is.sorted.integer64}
+#! \alias{is.sorted.integer64}
+#! \alias{na.count.integer64}
+#! \alias{nvalid.integer64}
+#! \alias{nunique.integer64}
+#! \alias{nties.integer64}
+#! \title{
+#! 	Small cache access methods
+#! }
+#! \description{
+#! 	These methods are packaged here for methods in packages \code{bit64} and \code{ff}.
+#! }
+#! \usage{
+#! 	\method{is.sorted}{integer64}(x, \dots)
+#! 	\method{na.count}{integer64}(x, \dots)
+#! 	\method{nvalid}{integer64}(x, \dots)
+#! 	\method{nunique}{integer64}(x, \dots)
+#! 	\method{nties}{integer64}(x, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{
+#! 	some object
+#! 	}
+#!   \item{\dots}{
+#! 	ignored
+#! 	}
+#! }
+#! \details{
+#!   All these functions benefit from a \code{\link{sortcache}}, \code{\link{ordercache}} or \code{\link{sortordercache}}.  
+#!   \code{na.count}, \code{nvalid} and \code{nunique} also benefit from a \code{\link{hashcache}}.
+#!	\cr
+#! 	\code{is.sorted} checks for sortedness of \code{x} (NAs sorted first) \cr
+#!  \code{na.count} returns the number of \code{NA}s \cr 
+#!  \code{nvalid} returns the number of valid data points, usually \code{\link{length}} minus \code{na.count}. \cr
+#!  \code{nunique} returns the number of unique values \cr
+#!  \code{nties} returns the number of tied values. 
+#! }
+#! \note{
+#! 	If a \code{\link{cache}} exists but the desired value is not cached, 
+#!  then these functions will store their result in the cache. 
+#!  We do not consider this a relevant side-effect, 
+#!  since these small cache results do not have a relevant memory footprint.
+#! }
+#! \value{
+#! 	\code{is.sorted} returns a logical scalar, the other methods return an integer scalar.
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#! 	\code{\link{cache}} for caching functions and \code{\link{sortordercache}} for functions creating big caches
+#! }
+#! \examples{
+#! 	x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#!  length(x)
+#!  na.count(x)
+#!  nvalid(x)
+#!  nunique(x)
+#!  nties(x)
+#!  table.integer64(x)
+#!  x
+#! }
+#! \keyword{ environment }
+#! \keyword{ methods }
+
+
+na.count.integer64 <- function(x, ...){
+  env <- cache(x)
+  if (is.null(env)){
+	.Call(C_r_ram_integer64_nacount, x = x, PACKAGE = "bit64")
+  }else{
+    if (exists("na.count", envir=env, inherits=FALSE))
+		get("na.count", envir=env, inherits=FALSE)
+	else{
+		ret <- .Call(C_r_ram_integer64_nacount, x = x, PACKAGE = "bit64")
+		assign("na.count", ret, envir=env)
+		ret
+	}
+  }
+}
+
+nvalid.integer64 <- function(x, ...){
+	length(x) - na.count(x)
+}
+
+is.sorted.integer64 <- function(x, ...){
+  env <- cache(x)
+  if (is.null(env)){
+	.Call(C_r_ram_integer64_issorted_asc, x = x, PACKAGE = "bit64")
+  }else{
+    if (exists("is.sorted", envir=env, inherits=FALSE))
+		get("is.sorted", envir=env, inherits=FALSE)
+	else{
+		ret <- .Call(C_r_ram_integer64_issorted_asc, x = x, PACKAGE = "bit64")
+		assign("is.sorted", ret, envir=env)
+		ret
+	}
+  }
+}
+
+
+nunique.integer64 <- function(x, ...){
+	env <- cache(x)
+	if(is.null(env))
+		has.cache <- FALSE
+	else{
+		if (exists("nunique", envir=env, inherits=FALSE))
+			return(get("nunique", envir=env, inherits=FALSE))
+		else
+			has.cache <- TRUE
+	}
+	if (is.sorted(x)){
+		ret <- .Call(C_r_ram_integer64_sortnut
+		, x = x
+		, PACKAGE = "bit64"
+		)
+		if (has.cache){
+			assign("nunique", ret[1], envir=env)
+			assign("nties", ret[2], envir=env)
+		}
+		ret[1]
+	}else{
+		h <- hashmap(x)
+		if (has.cache)
+		  assign("nunique", h$nunique, envir=env)
+		h$nunique
+	}
+}
+
+nties.integer64 <- function(x, ...){
+	cv <- getcache(x, "nties")
+	if (is.null(cv)){
+		if (is.sorted(x)){
+			cv <- .Call(C_r_ram_integer64_sortnut
+			, x = x
+			, PACKAGE = "bit64"
+			)[2]
+		}else{
+		    s <- clone(x)
+			na.count <- ramsort(s, has.na = TRUE, na.last = FALSE, decreasing = FALSE, stable = FALSE, optimize = "time")
+			cv <- .Call(C_r_ram_integer64_sortnut, x = s, PACKAGE = "bit64")[[2]]
+		}
+	}
+	cv
+}
+
diff --git a/R/hash64.R b/R/hash64.R
new file mode 100644
index 0000000..d542908
--- /dev/null
+++ b/R/hash64.R
@@ -0,0 +1,410 @@
+# /*
+# R-Code for hashing
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+# */
+
+
+#! \name{hashmap}
+#! \alias{hashfun}
+#! \alias{hashfun.integer64}
+#! \alias{hashmap}
+#! \alias{hashmap.integer64}
+#! \alias{hashpos}
+#! \alias{hashpos.cache_integer64}
+#! \alias{hashrev}
+#! \alias{hashrev.cache_integer64}
+#! \alias{hashfin}
+#! \alias{hashfin.cache_integer64}
+#! \alias{hashrin}
+#! \alias{hashrin.cache_integer64}
+#! \alias{hashdup}
+#! \alias{hashdup.cache_integer64}
+#! \alias{hashuni}
+#! \alias{hashuni.cache_integer64}
+#! \alias{hashmapuni}
+#! \alias{hashmapuni.integer64}
+#! \alias{hashupo}
+#! \alias{hashupo.cache_integer64}
+#! \alias{hashmapupo}
+#! \alias{hashmapupo.integer64}
+#! \alias{hashtab}
+#! \alias{hashtab.cache_integer64}
+#! \alias{hashmaptab}
+#! \alias{hashmaptab.integer64}
+#! \title{
+#!    Hashing for 64bit integers
+#! }
+#! \description{
+#! This is an explicit implementation of hash functionality that underlies 
+#! matching and other functions in R. Explicit means that you can create, 
+#! store and use hash functionality directly. One advantage is that you can
+#! re-use hashmaps, which avoid re-building hashmaps again and again.
+#! }
+#! \usage{
+#! hashfun(x, \dots)
+#! \method{hashfun}{integer64}(x, minfac=1.41, hashbits=NULL, \dots)
+#! hashmap(x, \dots)
+#! \method{hashmap}{integer64}(x, nunique=NULL, minfac=1.41, hashbits=NULL, cache=NULL, \dots)
+#! hashpos(cache, \dots)
+#! \method{hashpos}{cache_integer64}(cache, x, nomatch = NA_integer_, \dots)
+#! hashrev(cache, \dots)
+#! \method{hashrev}{cache_integer64}(cache, x, nomatch = NA_integer_, \dots)
+#! hashfin(cache, \dots)
+#! \method{hashfin}{cache_integer64}(cache, x, \dots)
+#! hashrin(cache, \dots)
+#! \method{hashrin}{cache_integer64}(cache, x, \dots)
+#! hashdup(cache, \dots)
+#! \method{hashdup}{cache_integer64}(cache, \dots)
+#! hashuni(cache, \dots)
+#! \method{hashuni}{cache_integer64}(cache, keep.order=FALSE, \dots)
+#! hashmapuni(x, \dots)
+#! \method{hashmapuni}{integer64}(x, nunique=NULL, minfac=1.5, hashbits=NULL, \dots)
+#! hashupo(cache, \dots)
+#! \method{hashupo}{cache_integer64}(cache, keep.order=FALSE, \dots)
+#! hashmapupo(x, \dots)
+#! \method{hashmapupo}{integer64}(x, nunique=NULL, minfac=1.5, hashbits=NULL, \dots)
+#! hashtab(cache, \dots)
+#! \method{hashtab}{cache_integer64}(cache, \dots)
+#! hashmaptab(x, \dots)
+#! \method{hashmaptab}{integer64}(x, nunique=NULL, minfac=1.5, hashbits=NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{ an integer64 vector }
+#!   \item{hashmap}{ an object of class 'hashmap' i.e. here 'cache_integer64' }
+#!   \item{minfac}{ minimum factor by which the hasmap has more elements compared to the data \code{x}, ignored if \code{hashbits} is given directly }
+#!   \item{hashbits}{ length of hashmap is \code{2^hashbits} }
+#!   \item{cache}{ an optional \code{\link{cache}} object into which to put the hashmap (by default a new cache is created)}
+#!   \item{nunique}{ giving \emph{correct} number of unique elements can help reducing the size of the hashmap }
+#!   \item{nomatch}{ the value to be returned if an element is not found in the hashmap }
+#!   \item{keep.order}{ determines order of results and speed: \code{FALSE} (the default) is faster and returns in the (pseudo)random order of the hash function, \code{TRUE} returns in the order of first appearance in the original data, but this requires extra work } 
+#!   \item{\dots}{ further arguments, passed from generics, ignored in methods }
+#! }
+#! \details{
+#! \tabular{rrl}{
+#!    \bold{function} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{hashfun} \tab \code{\link[digest]{digest}} \tab export of the hash function used in \code{hashmap} \cr
+#!    \code{hashmap} \tab \code{\link[=match.integer64]{match}} \tab return hashmap \cr
+#!    \code{hashpos} \tab \code{\link[=match.integer64]{match}} \tab return positions of \code{x} in \code{hashmap} \cr
+#!    \code{hashrev} \tab \code{\link[=match.integer64]{match}} \tab return positions of \code{hashmap} in \code{x} \cr
+#!    \code{hashfin} \tab \code{\link{\%in\%.integer64}} \tab return logical whether \code{x} is in \code{hashmap} \cr
+#!    \code{hashrin} \tab \code{\link{\%in\%.integer64}} \tab return logical whether \code{hashmap} is in \code{x}  \cr
+#!    \code{hashdup} \tab \code{\link[=duplicated.integer64]{duplicated}} \tab return logical whether hashdat is duplicated using hashmap\cr
+#!    \code{hashuni} \tab \code{\link[=unique.integer64]{unique}} \tab return unique values of hashmap  \cr
+#!    \code{hashmapuni} \tab \code{\link[=unique.integer64]{unique}} \tab return unique values of \code{x}  \cr
+#!    \code{hashupo} \tab \code{\link[=unique.integer64]{unique}} \tab return positions of unique values in hashdat \cr
+#!    \code{hashmapupo} \tab \code{\link[=unique.integer64]{unique}} \tab return positions of unique values in \code{x} \cr
+#!    \code{hashtab} \tab \code{\link[=table.integer64]{table}} \tab tabulate values of hashdat using hashmap in \code{keep.order=FALSE} \cr
+#!    \code{hashmaptab} \tab \code{\link[=table.integer64]{table}} \tab tabulate values of \code{x} building hasmap on the fly in \code{keep.order=FALSE}\cr
+#! }
+#! }
+#! \value{
+#!   see details
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ programming }
+#! \keyword{ manip }
+#! \seealso{ \code{\link[=match.integer64]{match}} }
+#! \examples{
+#! x <- as.integer64(sample(c(NA, 0:9)))
+#! y <- as.integer64(sample(c(NA, 1:9), 10, TRUE))
+#! hashfun(y)
+#! hx <- hashmap(x)
+#! hy <- hashmap(y)
+#! ls(hy)
+#! hashpos(hy, x)
+#! hashrev(hx, y)
+#! hashfin(hy, x)
+#! hashrin(hx, y)
+#! hashdup(hy)
+#! hashuni(hy)
+#! hashuni(hy, keep.order=TRUE)
+#! hashmapuni(y)
+#! hashupo(hy)
+#! hashupo(hy, keep.order=TRUE)
+#! hashmapupo(y)
+#! hashtab(hy)
+#! hashmaptab(y)
+#! 
+#! stopifnot(identical(match(as.integer(x),as.integer(y)),hashpos(hy, x)))
+#! stopifnot(identical(match(as.integer(x),as.integer(y)),hashrev(hx, y)))
+#! stopifnot(identical(as.integer(x) \%in\% as.integer(y), hashfin(hy, x)))
+#! stopifnot(identical(as.integer(x) \%in\% as.integer(y), hashrin(hx, y)))
+#! stopifnot(identical(duplicated(as.integer(y)), hashdup(hy)))
+#! stopifnot(identical(as.integer64(unique(as.integer(y))), hashuni(hy, keep.order=TRUE)))
+#! stopifnot(identical(sort(hashuni(hy, keep.order=FALSE)), sort(hashuni(hy, keep.order=TRUE))))
+#! stopifnot(identical(y[hashupo(hy, keep.order=FALSE)], hashuni(hy, keep.order=FALSE)))
+#! stopifnot(identical(y[hashupo(hy, keep.order=TRUE)], hashuni(hy, keep.order=TRUE)))
+#! stopifnot(identical(hashpos(hy, hashuni(hy, keep.order=TRUE)), hashupo(hy, keep.order=TRUE)))
+#! stopifnot(identical(hashpos(hy, hashuni(hy, keep.order=FALSE)), hashupo(hy, keep.order=FALSE)))
+#! stopifnot(identical(hashuni(hy, keep.order=FALSE), hashtab(hy)$values))
+#! stopifnot(identical(as.vector(table(as.integer(y), useNA="ifany"))
+#! , hashtab(hy)$counts[order.integer64(hashtab(hy)$values)]))
+#! stopifnot(identical(hashuni(hy, keep.order=TRUE), hashmapuni(y)))
+#! stopifnot(identical(hashupo(hy, keep.order=TRUE), hashmapupo(y)))
+#! stopifnot(identical(hashtab(hy), hashmaptab(y)))
+#!
+#! 	\dontrun{
+#! 	message("explore speed given size of the hasmap in 2^hashbits and size of the data")
+#! 	message("more hashbits means more random access and less collisions")
+#! 	message("i.e. more data means less random access and more collisions")
+#! 	bits <- 24
+#! 	b <- seq(-1, 0, 0.1)
+#! 	tim <- matrix(NA, length(b), 2, dimnames=list(b, c("bits","bits+1")))
+#!     for (i in 1:length(b)){
+#! 	  n <- as.integer(2^(bits+b[i]))
+#! 	  x <- as.integer64(sample(n))
+#! 	  tim[i,1] <- repeat.time(hashmap(x, hashbits=bits))[3]
+#! 	  tim[i,2] <- repeat.time(hashmap(x, hashbits=bits+1))[3]
+#! 	  print(tim)
+#!       matplot(b, tim)
+#! 	}
+#! 	message("we conclude that n*sqrt(2) is enough to avoid collisions")
+#! 	}
+#! }
+
+
+hashfun <- function(x, ...)UseMethod("hashfun")
+hashfun.integer64 <- function(x, minfac=1.41, hashbits=NULL, ...){
+  n <- length(x)
+  if (is.null(hashbits)){
+    minlen <- ceiling(n*minfac)
+    hashbits <- as.integer(ceiling(log2(minlen)))
+  }else 
+    hashbits <- as.integer(hashbits)
+  ret <- integer(n)
+  .Call(C_hashfun_integer64, x, hashbits, ret, PACKAGE = "bit64")
+  ret
+}
+
+hashmap <- function(x, ...)UseMethod("hashmap")
+hashmap.integer64 <- function(x, nunique=NULL, minfac=1.41, hashbits=NULL, cache=NULL, ...){
+  if (is.null(nunique)){
+    nunique <- integer(1)
+	n <- length(x)
+  }else{
+    nunique <- as.integer(nunique)
+    n <- nunique
+  }
+  if (is.null(hashbits))
+    hashbits <- as.integer(ceiling(log2(n*minfac)))
+  else 
+    hashbits <- as.integer(hashbits)
+  nhash <- as.integer(2^hashbits)
+  hashmap <- integer(nhash)
+  .Call(C_hashmap_integer64, x, hashbits, hashmap, nunique, PACKAGE = "bit64")
+  
+  if (is.null(cache))
+	cache <- newcache(x)
+  else
+    if (!still.identical(x, get("x", envir=cache, inherits=FALSE)))
+		  stop("vector 'x' dissociated from cache")
+  assign("hashmap", hashmap, envir=cache)
+  assign("hashbits", hashbits, envir=cache)
+  assign("nhash", nhash, envir=cache)
+  assign("nunique", nunique, envir=cache)
+  cache
+}
+
+hashpos <- function(cache, ...)UseMethod("hashpos")
+hashpos.cache_integer64 <- function(cache, x, nomatch = NA_integer_, ...){
+  hashbits <- get("hashbits", envir=cache, inherits=FALSE)
+  hashmap <- get("hashmap", envir=cache, inherits=FALSE)
+  hashdat <- get("x", envir=cache, inherits=FALSE)
+  ret <- integer(length(x))
+  .Call(C_hashpos_integer64, as.integer64(x), hashdat, hashbits, hashmap, as.integer(nomatch), ret, PACKAGE = "bit64")
+  ret
+}
+
+hashrev <- function(cache, ...)UseMethod("hashrev")
+hashrev.cache_integer64	 <- function(cache, x, nomatch = NA_integer_, ...){
+  hashbits <- get("hashbits", envir=cache, inherits=FALSE)
+  hashmap <- get("hashmap", envir=cache, inherits=FALSE)
+  hashdat <- get("x", envir=cache, inherits=FALSE)
+  nunique <- get("nunique", envir=cache, inherits=FALSE)
+  ret <- integer(length(hashdat))
+  .Call(C_hashrev_integer64, as.integer64(x), hashdat, hashbits, hashmap, nunique, as.integer(nomatch), ret, PACKAGE = "bit64")
+  ret
+}
+
+hashfin <- function(cache, ...)UseMethod("hashfin")
+hashfin.cache_integer64 <- function(cache, x, ...){
+  hashbits <- get("hashbits", envir=cache, inherits=FALSE)
+  hashmap <- get("hashmap", envir=cache, inherits=FALSE)
+  hashdat <- get("x", envir=cache, inherits=FALSE)
+  ret <- logical(length(x))
+  .Call(C_hashfin_integer64, as.integer64(x), hashdat, hashbits, hashmap, ret, PACKAGE = "bit64")
+  ret
+}
+
+hashrin <- function(cache, ...)UseMethod("hashrin")
+hashrin.cache_integer64 <- function(cache, x, ...){
+  hashbits <- get("hashbits", envir=cache, inherits=FALSE)
+  hashmap <- get("hashmap", envir=cache, inherits=FALSE)
+  hashdat <- get("x", envir=cache, inherits=FALSE)
+  ret <- logical(length(hashdat))
+  .Call(C_hashrin_integer64, as.integer64(x), hashdat, hashbits, hashmap, nunique, ret, PACKAGE = "bit64")
+  ret
+}
+
+
+hashdup <- function(cache, ...)UseMethod("hashdup")
+hashdup.cache_integer64 <- function(cache, ...){
+  hashbits <- get("hashbits", envir=cache, inherits=FALSE)
+  hashmap <- get("hashmap", envir=cache, inherits=FALSE)
+  hashdat <- get("x", envir=cache, inherits=FALSE)
+  nunique <- get("nunique", envir=cache, inherits=FALSE)
+  ret <- logical(length(hashdat))
+  .Call(C_hashdup_integer64, hashdat, hashbits, hashmap, nunique, ret, PACKAGE = "bit64")
+  ret
+}
+
+hashuni <- function(cache, ...)UseMethod("hashuni")
+hashuni.cache_integer64 <- function(cache, keep.order=FALSE, ...){
+  hashbits <- get("hashbits", envir=cache, inherits=FALSE)
+  hashmap <- get("hashmap", envir=cache, inherits=FALSE)
+  hashdat <- get("x", envir=cache, inherits=FALSE)
+  nunique <- get("nunique", envir=cache, inherits=FALSE)
+  ret <- double(nunique)
+  .Call(C_hashuni_integer64, hashdat, hashbits, hashmap, as.logical(keep.order), ret, PACKAGE = "bit64")
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+hashupo <- function(cache, ...)UseMethod("hashupo")
+hashupo.cache_integer64 <- function(cache, keep.order=FALSE, ...){
+  hashbits <- get("hashbits", envir=cache, inherits=FALSE)
+  hashmap <- get("hashmap", envir=cache, inherits=FALSE)
+  hashdat <- get("x", envir=cache, inherits=FALSE)
+  nunique <- get("nunique", envir=cache, inherits=FALSE)
+  ret <- integer(nunique)
+  .Call(C_hashupo_integer64, hashdat, hashbits, hashmap, as.logical(keep.order), ret, PACKAGE = "bit64")
+  ret
+}
+
+# just returns a vector of length nunique of counts of the values 
+# at positions hashupo(, keep.order=FALSE) which are those of hashuni(, keep.order=FALSE)
+hashtab <- function(cache, ...)UseMethod("hashtab")
+hashtab.cache_integer64 <- function(cache, ...){
+  hashbits <- get("hashbits", envir=cache, inherits=FALSE)
+  hashmap <- get("hashmap", envir=cache, inherits=FALSE)
+  hashdat <- get("x", envir=cache, inherits=FALSE)
+  nunique <- get("nunique", envir=cache, inherits=FALSE)
+  ret <- .Call(C_hashtab_integer64, hashdat, hashbits, hashmap, nunique, PACKAGE = "bit64")
+  attr(ret, "names") <- c("values","counts")
+  ret
+}
+
+hashmaptab <- function(x, ...)UseMethod("hashmaptab")
+hashmaptab.integer64 <- function(x, nunique=NULL, minfac=1.5, hashbits=NULL, ...){
+  if (is.null(nunique)){
+    nunique <- integer(1)
+	n <- length(x)
+  }else{
+    nunique <- as.integer(nunique)
+    n <- nunique
+  }
+  if (is.null(hashbits))
+    hashbits <- as.integer(ceiling(log2(n*minfac)))
+  else 
+    hashbits <- as.integer(hashbits)
+  nhash <- as.integer(2^hashbits)
+  hashmap <- integer(nhash)
+  ret <- .Call(C_hashmaptab_integer64, x, hashbits, hashmap, nunique, PACKAGE = "bit64")
+  # theoretically we could use {hashmap, nunique} at this point the same way like after calling hashmap_integer64
+  attr(ret, "names") <- c("values","counts")
+  ret
+}
+
+hashmapuni <- function(x, ...)UseMethod("hashmapuni")
+hashmapuni.integer64 <- function(x, nunique=NULL, minfac=1.5, hashbits=NULL, ...){
+  if (is.null(nunique)){
+    nunique <- integer(1)
+	n <- length(x)
+  }else{
+    nunique <- as.integer(nunique)
+    n <- nunique
+  }
+  if (is.null(hashbits))
+    hashbits <- as.integer(ceiling(log2(n*minfac)))
+  else 
+    hashbits <- as.integer(hashbits)
+  nhash <- as.integer(2^hashbits)
+  hashmap <- integer(nhash)
+  ret <- .Call(C_hashmapuni_integer64, x, hashbits, hashmap, nunique, PACKAGE = "bit64")
+  # theoretically we could use {hashmap, nunique} at this point the same way like after calling hashmap_integer64
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+hashmapupo <- function(x, ...)UseMethod("hashmapupo")
+hashmapupo.integer64 <- function(x, nunique=NULL, minfac=1.5, hashbits=NULL, ...){
+  if (is.null(nunique)){
+    nunique <- integer(1)
+	n <- length(x)
+  }else{
+    nunique <- as.integer(nunique)
+    n <- nunique
+  }
+  if (is.null(hashbits))
+    hashbits <- as.integer(ceiling(log2(n*minfac)))
+  else 
+    hashbits <- as.integer(hashbits)
+  nhash <- as.integer(2^hashbits)
+  hashmap <- integer(nhash)
+  # theoretically we could use {hashmap, nunique} at this point the same way like after calling hashmap_integer64
+  .Call(C_hashmapupo_integer64, x, hashbits, hashmap, nunique, PACKAGE = "bit64")
+}
+
+if (FALSE){
+  library(bit64)
+  n <- 1e7
+  x <- as.integer64(sample(n, n, TRUE))
+  t1 <- system.time({h <- hashmap(x)})[3]
+  t2 <- system.time({value <- hashuni(h)})[3]
+  t3 <- system.time({count <- hashtab(h)})[3]
+  t4 <- system.time({ret1 <- list(values=value, counts=count)})[3]
+  t1+t2+t3+t4
+  system.time({ret2 <- hashmaptab(x)})[3]
+  identical(ret1,ret2)
+
+
+  x <- as.integer64(sample(n, n, TRUE))
+  
+  system.time({
+	ret2 <- hashmaptab(x)
+	cv2 <- sum(ret2$counts[ret2$counts>1])
+  })[3]
+
+  system.time({
+	s <- clone(x)
+	na.count <- ramsort(s, has.na = TRUE, na.last = FALSE, decreasing = FALSE, stable = FALSE, optimize = "time")
+	cv <- .Call(C_r_ram_integer64_sortnut, x = s, PACKAGE = "bit64")[[2]]
+	})
+
+  cv
+  cv2
+
+	
+  nunique(x)
+  length(value)
+  length(count)
+  length(t1$value)
+  length(t1$count)
+  value
+  t1
+  count
+ 
+  s <- clone(x); o <- seq_along(x); ramsortorder(s, o);
+  t2 <- sortordertab(s,o);
+  length(s)
+  length(t2)
+  
+ 
+}
diff --git a/R/highlevel64.R b/R/highlevel64.R
new file mode 100644
index 0000000..e9cc433
--- /dev/null
+++ b/R/highlevel64.R
@@ -0,0 +1,3048 @@
+# /*
+# R-Code for matching and other functions based on hashing
+# S3 atomic 64bit integers for R
+# (c) 2012 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+# */
+
+#! \name{benchmark64}
+#! \alias{benchmark64}
+#! \alias{optimizer64}
+#! \title{
+#!  Function for measuring algorithmic performance \cr 
+#!  of high-level and low-level integer64 functions
+#! }
+#! \description{
+#!  \code{benchmark64} compares high-level integer64 functions against the integer functions from Base R \cr
+#!  \code{optimizer64} compares for each high-level integer64 function the Base R integer function with several low-level integer64 functions with and without caching \cr
+#! }
+#! \usage{
+#! benchmark64(nsmall = 2^16, nbig = 2^25, timefun = repeat.time
+#! )
+#! optimizer64(nsmall = 2^16, nbig = 2^25, timefun = repeat.time
+#! , what = c("match", "\%in\%", "duplicated", "unique", "unipos", "table", "rank", "quantile")
+#! , uniorder = c("original", "values", "any")
+#! , taborder = c("values", "counts")
+#! , plot = TRUE
+#! )
+#! }
+#! \arguments{
+#!   \item{nsmall}{ size of smaller vector }
+#!   \item{nbig}{ size of larger bigger vector }
+#!   \item{timefun}{ a function for timing such as \code{\link[bit]{repeat.time}} or \code{\link{system.time}} }
+#!   \item{what}{
+#!  a vector of names of high-level functions
+#! }
+#!   \item{uniorder}{
+#!  one of the order parameters that are allowed in \code{\link{unique.integer64}} and \code{\link{unipos.integer64}}
+#! }
+#!   \item{taborder}{
+#!  one of the order parameters that are allowed in \code{\link{table.integer64}} 
+#! }
+#!   \item{plot}{
+#!  set to FALSE to suppress plotting 
+#! }
+#! }
+#! \details{
+#!  \code{benchmark64} compares the following scenarios for the following use cases: 
+#!  \tabular{rl}{
+#!   \bold{scenario name} \tab \bold{explanation} \cr
+#!   32-bit  \tab applying Base R function to 32-bit integer data \cr
+#!   64-bit \tab applying bit64 function to 64-bit integer data (with no cache) \cr
+#!   hashcache \tab dito when cache contains \code{\link{hashmap}}, see \code{\link{hashcache}} \cr
+#!   sortordercache \tab dito when cache contains sorting and ordering, see \code{\link{sortordercache}} \cr
+#!   ordercache \tab dito when cache contains ordering only, see \code{\link{ordercache}} \cr
+#!   allcache \tab dito when cache contains sorting, ordering and hashing \cr
+#!  }
+#!  \tabular{rl}{
+#!   \bold{use case name} \tab \bold{explanation} \cr
+#!   cache         \tab filling the cache according to scenario \cr
+#!   match(s,b)    \tab match small in big vector \cr
+#!   s \%in\% b      \tab small \%in\% big vector \cr
+#!   match(b,s)    \tab match big in small vector \cr
+#!   b \%in\% s      \tab big \%in\% small vector \cr
+#!   match(b,b)    \tab match big in (different) big vector \cr
+#!   b \%in\% b      \tab big \%in\% (different) big vector \cr
+#!   duplicated(b) \tab duplicated of big vector \cr
+#!   unique(b)     \tab unique of big vector \cr
+#!   table(b)      \tab table of big vector \cr
+#!   sort(b)       \tab sorting of big vector \cr
+#!   order(b)      \tab ordering of big vector \cr
+#!   rank(b)       \tab ranking of big vector \cr
+#!   quantile(b)   \tab quantiles of big vector \cr
+#!   summary(b)    \tab summary of of big vector \cr
+#!   SESSION       \tab exemplary session involving multiple calls (including cache filling costs) \cr
+#!  }
+#!  Note that the timings for the cached variants do \emph{not} contain the time costs of building the cache, except for the timing of the exemplary user session, where the cache costs are included in order to evaluate amortization. 
+#! }
+#! \value{
+#!  \code{benchmark64} returns a matrix with elapsed seconds, different high-level tasks in rows and different scenarios to solve the task in columns. The last row named 'SESSION' contains the elapsed seconds of the exemplary sesssion.
+#!  \cr
+#!  \code{optimizer64} returns a dimensioned list with one row for each high-level function timed and two columns named after the values of the \code{nsmall} and \code{nbig} sample sizes. Each list cell contains a matrix with timings, low-level-methods in rows and three measurements \code{c("prep","both","use")} in columns. If it can be measured separately, \code{prep} contains the timing of preparatory work such as sorting and hashing, and \code{use} contains the timing of using the pre [...]
+#! }
+#! \author{
+#!  Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#!  \code{\link{integer64}}
+#! }
+#! \examples{
+#! message("this small example using system.time does not give serious timings\n
+#! this we do this only to run regression tests")
+#! benchmark64(nsmall=2^7, nbig=2^13, timefun=function(expr)system.time(expr, gcFirst=FALSE))
+#! optimizer64(nsmall=2^7, nbig=2^13, timefun=function(expr)system.time(expr, gcFirst=FALSE)
+#! , plot=FALSE
+#! )
+#!\dontrun{
+#! message("for real measurement of sufficiently large datasets run this on your machine")
+#! benchmark64()
+#! optimizer64()
+#!}
+#! message("let's look at the performance results on Core i7 Lenovo T410 with 8 GB RAM")
+#! data(benchmark64.data)
+#! print(benchmark64.data)
+#! 
+#! matplot(log2(benchmark64.data[-1,1]/benchmark64.data[-1,])
+#! , pch=c("3", "6", "h", "s", "o", "a") 
+#! , xlab="tasks [last=session]"
+#! , ylab="log2(relative speed) [bigger is better]"
+#! )
+#! matplot(t(log2(benchmark64.data[-1,1]/benchmark64.data[-1,]))
+#! , type="b", axes=FALSE 
+#! , lwd=c(rep(1, 14), 3)
+#! , xlab="context"
+#! , ylab="log2(relative speed) [bigger is better]"
+#! )
+#! axis(1
+#! , labels=c("32-bit", "64-bit", "hash", "sortorder", "order", "hash+sortorder")
+#! , at=1:6
+#! )
+#! axis(2)
+#! data(optimizer64.data)
+#! print(optimizer64.data)
+#! oldpar <- par(no.readonly = TRUE)
+#! par(mfrow=c(2,1))
+#! par(cex=0.7)
+#! for (i in 1:nrow(optimizer64.data)){
+#!  for (j in 1:2){
+#!    tim <- optimizer64.data[[i,j]]
+#!   barplot(t(tim))
+#!   if (rownames(optimizer64.data)[i]=="match")
+#!    title(paste("match", colnames(optimizer64.data)[j], "in", colnames(optimizer64.data)[3-j]))
+#!   else if (rownames(optimizer64.data)[i]=="\%in\%")
+#!    title(paste(colnames(optimizer64.data)[j], "\%in\%", colnames(optimizer64.data)[3-j]))
+#!   else
+#!    title(paste(rownames(optimizer64.data)[i], colnames(optimizer64.data)[j]))
+#!  }
+#! }
+#! par(mfrow=c(1,1))
+#!}
+#! \keyword{ misc }
+
+#! \name{benchmark64.data}
+#! \alias{benchmark64.data}
+#! \docType{data}
+#! \title{
+#!  Results of performance measurement on a Core i7 Lenovo T410 8 GB RAM under Windows 7 64bit
+#! }
+#! \description{
+#!   These are the results of calling \code{\link{benchmark64}}
+#! }
+#! \usage{data(benchmark64.data)}
+#! \format{
+#!   The format is:
+#!  num [1:16, 1:6] 2.55e-05 2.37 2.39 1.28 1.39 ...
+#!  - attr(*, "dimnames")=List of 2
+#!   ..$ : chr [1:16] "cache" "match(s,b)" "s \%in\% b" "match(b,s)" ...
+#!   ..$ : chr [1:6] "32-bit" "64-bit" "hashcache" "sortordercache" ...
+#! }
+#! \examples{
+#! data(benchmark64.data)
+#! print(benchmark64.data)
+#! matplot(log2(benchmark64.data[-1,1]/benchmark64.data[-1,])
+#! , pch=c("3", "6", "h", "s", "o", "a")
+#! , xlab="tasks [last=session]"
+#! , ylab="log2(relative speed) [bigger is better]"
+#! )
+#! matplot(t(log2(benchmark64.data[-1,1]/benchmark64.data[-1,]))
+#! , axes=FALSE
+#! , type="b"
+#! , lwd=c(rep(1, 14), 3)
+#! , xlab="context"
+#! , ylab="log2(relative speed) [bigger is better]"
+#! )
+#! axis(1
+#! , labels=c("32-bit", "64-bit", "hash", "sortorder", "order", "hash+sortorder")
+#! , at=1:6
+#! )
+#! axis(2)
+#! }
+#! \keyword{datasets}
+
+
+#! \name{optimizer64.data}
+#! \alias{optimizer64.data}
+#! \docType{data}
+#! \title{
+#!  Results of performance measurement on a Core i7 Lenovo T410 8 GB RAM under Windows 7 64bit
+#! }
+#! \description{
+#!   These are the results of calling \code{\link{optimizer64}}
+#! }
+#! \usage{data(optimizer64.data)}
+#! \format{
+#!   The format is:
+#! List of 16
+#!  $ : num [1:9, 1:3] 0 0 1.63 0.00114 2.44 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:9] "match" "match.64" "hashpos" "hashrev" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:10, 1:3] 0 0 0 1.62 0.00114 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:10] "\%in\%" "match.64" "\%in\%.64" "hashfin" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:10, 1:3] 0 0 0.00105 0.00313 0.00313 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:10] "duplicated" "duplicated.64" "hashdup" "sortorderdup1" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:15, 1:3] 0 0 0 0.00104 0.00104 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:15] "unique" "unique.64" "hashmapuni" "hashuni" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:14, 1:3] 0 0 0 0.000992 0.000992 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:14] "unique" "unipos.64" "hashmapupo" "hashupo" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:13, 1:3] 0 0 0 0 0.000419 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:13] "tabulate" "table" "table.64" "hashmaptab" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:7, 1:3] 0 0 0 0.00236 0.00714 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:7] "rank" "rank.keep" "rank.64" "sortorderrnk" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:6, 1:3] 0 0 0.00189 0.00714 0 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:6] "quantile" "quantile.64" "sortqtl" "orderqtl" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:9, 1:3] 0 0 0.00105 1.17 0 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:9] "match" "match.64" "hashpos" "hashrev" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:10, 1:3] 0 0 0 0.00104 1.18 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:10] "\%in\%" "match.64" "\%in\%.64" "hashfin" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:10, 1:3] 0 0 1.64 2.48 2.48 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:10] "duplicated" "duplicated.64" "hashdup" "sortorderdup1" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:15, 1:3] 0 0 0 1.64 1.64 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:15] "unique" "unique.64" "hashmapuni" "hashuni" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:14, 1:3] 0 0 0 1.62 1.62 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:14] "unique" "unipos.64" "hashmapupo" "hashupo" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:13, 1:3] 0 0 0 0 0.32 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:13] "tabulate" "table" "table.64" "hashmaptab" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:7, 1:3] 0 0 0 2.96 10.69 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:7] "rank" "rank.keep" "rank.64" "sortorderrnk" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  $ : num [1:6, 1:3] 0 0 1.62 10.61 0 ...
+#!   ..- attr(*, "dimnames")=List of 2
+#!   .. ..$ : chr [1:6] "quantile" "quantile.64" "sortqtl" "orderqtl" ...
+#!   .. ..$ : chr [1:3] "prep" "both" "use"
+#!  - attr(*, "dim")= int [1:2] 8 2
+#!  - attr(*, "dimnames")=List of 2
+#!   ..$ : chr [1:8] "match" "\%in\%" "duplicated" "unique" ...
+#!   ..$ : chr [1:2] "65536" "33554432"
+#! }
+#! \examples{
+#! data(optimizer64.data)
+#! print(optimizer64.data)
+#! oldpar <- par(no.readonly = TRUE)
+#! par(mfrow=c(2,1))
+#! par(cex=0.7)
+#! for (i in 1:nrow(optimizer64.data)){
+#!  for (j in 1:2){
+#!    tim <- optimizer64.data[[i,j]]
+#!   barplot(t(tim))
+#!   if (rownames(optimizer64.data)[i]=="match")
+#!    title(paste("match", colnames(optimizer64.data)[j], "in", colnames(optimizer64.data)[3-j]))
+#!   else if (rownames(optimizer64.data)[i]=="\%in\%")
+#!    title(paste(colnames(optimizer64.data)[j], "\%in\%", colnames(optimizer64.data)[3-j]))
+#!   else
+#!    title(paste(rownames(optimizer64.data)[i], colnames(optimizer64.data)[j]))
+#!  }
+#! }
+#! par(mfrow=c(1,1))
+#! }
+#! \keyword{datasets}
+
+
+benchmark64 <- function(nsmall=2^16, nbig=2^25, timefun=repeat.time)
+{
+ 
+ message('\ncompare performance for a complete sessions of calls')
+ s <- sample(nbig, nsmall, TRUE)
+ b <- sample(nbig, nbig, TRUE)
+ b2 <- sample(nbig, nbig, TRUE)
+ 
+ tim1 <- double(6)
+ names(tim1) <- c("32-bit","64-bit","hashcache","sortordercache","ordercache","allcache")
+
+ s <- as.integer(s)
+ b <- as.integer(b)
+ b2 <- as.integer(b2)
+ 
+ i <- 1
+ for (i in 1:6){
+  message("\n=== ", names(tim1)[i], " ===")
+  
+  if (i==2){
+   s <- as.integer64(s)
+   b <- as.integer64(b)
+   b2 <- as.integer64(b2)
+  }
+
+  tim1[i] <- 0
+ 
+  tim1[i] <- tim1[i] + timefun({
+   switch(as.character(i)
+   , "3" = {hashcache(s); hashcache(b); hashcache(b2)}
+   , "4" = {sortordercache(s); sortordercache(b); sortordercache(b2)}
+   , "5" = {ordercache(s); ordercache(b); ordercache(b2)}
+   , "6" = {hashcache(s); hashcache(b); hashcache(b2);sortordercache(s); sortordercache(b); sortordercache(b2)}
+   )
+  })[3]
+ 
+  message('check data range, mean etc.')
+  tim1[i] <- tim1[i] + timefun({
+   summary(b)
+  })[3]
+  message('get all percentiles for plotting distribution shape')
+  tim1[i] <- tim1[i] + timefun({
+   quantile(b, probs=seq(0, 1, 0.01))
+  })[3]
+  message('list the upper and lower permille of values')
+  tim1[i] <- tim1[i] + timefun({
+   quantile(b, probs=c(0.001, 0.999))
+   sort(b, na.last=NA)
+  })[3]
+  message('OK, for some of these values I want to see the complete ROW, so I need their positions in the data.frame')
+  tim1[i] <- tim1[i] + timefun({
+   if(i==1)order(b) else order.integer64(b)
+  })[3]
+  message('check if any values are duplicated')
+  tim1[i] <- tim1[i] + timefun({
+   any(duplicated(b))
+  })[3]
+  message('since not unique, then check distribution of frequencies')
+  tim1[i] <- tim1[i] + timefun({
+   if(i==1)tabulate(table(b, exclude=NULL)) else tabulate(table.integer64(b, return='list')$counts)
+  })[3]
+  message("OK, let's plot the percentiles of unique values versus the percentiles allowing for duplicates")
+  tim1[i] <- tim1[i] + timefun({
+   quantile(b, probs=seq(0, 1, 0.01))
+   quantile(unique(b), probs=seq(0, 1, 0.01))
+  })[3]
+  message('check whether we find a match for each fact in the dimension table')
+  tim1[i] <- tim1[i] + timefun({
+   all(if(i==1) b %in% s else "%in%.integer64"(b, s))
+  })[3]
+  message('check whether there are any dimension table entries not in the fact table')
+  tim1[i] <- tim1[i] + timefun({
+   all(if(i==1) s %in% b else "%in%.integer64"(s, b))
+  })[3]
+  message('check whether we find a match for each fact in a parallel fact table')
+  tim1[i] <- tim1[i] + timefun({
+   all(if(i==1) b %in% b2 else "%in%.integer64"(b, b2))
+  })[3]
+  message('find positions of facts in dimension table for joining')
+  tim1[i] <- tim1[i] + timefun({
+   if(i==1) match(b, s) else match.integer64(b, s)
+  })[3]
+  message('find positions of facts in parallel fact table for joining')
+  tim1[i] <- tim1[i] + timefun({
+   if(i==1) match(b, b2) else match.integer64(b, b2)
+  })[3]
+  message('out of curiosity: how well rank-correlated are fact and parallel fact table?')
+  tim1[i] <- tim1[i] + timefun({
+   if (i==1){
+    cor(rank(b, na.last="keep"), rank(b2, na.last="keep"), use="na.or.complete")
+   }else{
+    cor(rank.integer64(b), rank.integer64(b2), use="na.or.complete")
+   }
+  })[3]
+  
+  remcache(s)
+  remcache(b)
+  remcache(b2)
+  
+  print(round(rbind(seconds=tim1, factor=tim1[1]/tim1), 3))
+ 
+ }
+
+        # 32-bit         64-bit      hashcache sortordercache     ordercache       allcache 
+       # 196.510          8.963          8.242          5.183         12.325          6.043 
+        # 32-bit         64-bit      hashcache sortordercache     ordercache       allcache 
+         # 1.000         21.924         23.842         37.913         15.944         32.519 
+
+   
+ message("\nnow let's look more systematically at the components involved")
+ s <- sample(nbig, nsmall, TRUE)
+ b <- sample(nbig, nbig, TRUE)
+ b2 <- sample(nbig, nbig, TRUE)
+ 
+ tim2 <- matrix(0, 15, 6)
+ dimnames(tim2) <- list(c("cache", "match(s,b)", "s %in% b", "match(b,s)", "b %in% s", "match(b,b)", "b %in% b", "duplicated(b)", "unique(b)", "table(b)", "sort(b)", "order(b)", "rank(b)", "quantile(b)", "summary(b)")
+ , c("32-bit","64-bit","hashcache","sortordercache","ordercache","allcache"))
+
+ s <- as.integer(s)
+ b <- as.integer(b)
+ b2 <- as.integer(b2)
+ 
+ i <- 1
+ for (i in 1:6){
+  if (i==2){
+   s <- as.integer64(s)
+   b <- as.integer64(b)
+   b2 <- as.integer64(b2)
+  }
+ 
+  if (i>2)message(colnames(tim2)[i], " cache")
+  tim2["cache",i] <- timefun({
+   switch(as.character(i)
+   , "3" = {hashcache(s); hashcache(b); hashcache(b2)}
+   , "4" = {sortordercache(s); sortordercache(b); sortordercache(b2)}
+   , "5" = {ordercache(s); ordercache(b); ordercache(b2)}
+   , "6" = {hashcache(s); hashcache(b); hashcache(b2);sortordercache(s); sortordercache(b); sortordercache(b2)}
+   )
+  })[3]
+ 
+  message(colnames(tim2)[i], " match(s,b)")
+  tim2["match(s,b)",i] <- timefun({
+   if (i==1) match(s, b) else match.integer64(s, b)
+  })[3]
+ 
+  message(colnames(tim2)[i], " s %in% b")
+  tim2["s %in% b",i] <- timefun({
+   if (i==1) s %in% b else "%in%.integer64"(s,b)
+  })[3]
+ 
+  message(colnames(tim2)[i], " match(b,s)")
+  tim2["match(b,s)",i] <- timefun({
+   if (i==1) match(b, s) else match.integer64(b, s)
+  })[3]
+ 
+  message(colnames(tim2)[i], " b %in% s")
+  tim2["b %in% s",i] <- timefun({
+   if (i==1) b %in% s else "%in%.integer64"(b,s)
+  })[3]
+ 
+  message(colnames(tim2)[i], " match(b,b)")
+  tim2["match(b,b)",i] <- timefun({
+   if (i==1) match(b, b2) else match.integer64(b, b2)
+  })[3]
+ 
+  message(colnames(tim2)[i], " b %in% b")
+  tim2["b %in% b",i] <- timefun({
+   if (i==1) b %in% b2 else "%in%.integer64"(b,b2)
+  })[3]
+ 
+  message(colnames(tim2)[i], " duplicated(b)")
+  tim2["duplicated(b)",i] <- timefun({
+   duplicated(b)
+  })[3]
+ 
+  message(colnames(tim2)[i], " unique(b)")
+  tim2["unique(b)",i] <- timefun({
+   unique(b)
+  })[3]
+ 
+  message(colnames(tim2)[i], " table(b)")
+  tim2["table(b)",i] <- timefun({
+   if(i==1) table(b) else table.integer64(b, return='list')
+  })[3]
+ 
+  message(colnames(tim2)[i], " sort(b)")
+  tim2["sort(b)",i] <- timefun({
+   sort(b)
+  })[3]
+ 
+  message(colnames(tim2)[i], " order(b)")
+  tim2["order(b)",i] <- timefun({
+   if(i==1) order(b) else order.integer64(b)
+  })[3]
+ 
+  message(colnames(tim2)[i], " rank(b)")
+  tim2["rank(b)",i] <- timefun({
+   if(i==1) rank(b) else rank.integer64(b)
+  })[3]
+ 
+  message(colnames(tim2)[i], " quantile(b)")
+  tim2["quantile(b)",i] <- timefun({
+   quantile(b)
+  })[3]
+ 
+  message(colnames(tim2)[i], " summary(b)")
+  tim2["summary(b)",i] <- timefun({
+   summary(b)
+  })[3]
+  
+  remcache(s)
+  remcache(b)
+  remcache(b2)
+  
+  tim3 <- rbind(tim2, SESSION=tim1)
+  #tim2 <- tim2[,1]/tim2
+  
+  cat("seconds")
+  print(round(tim3, 3))
+  cat("factor")
+  print(round(tim3[,1]/tim3, 3))
+ 
+ }
+
+
+ 
+               # 32-bit 64-bit hashcache sortordercache ordercache allcache
+# cache           0.000  0.000     0.775          1.330      6.500    2.660
+# match(s,b)      0.820  0.218     0.004          0.025      0.093    0.004
+# s %in% b        0.810  0.234     0.003          0.022      0.093    0.003
+# match(b,s)      0.450  0.228     0.232          0.224      0.224    0.226
+# b %in% s        0.510  0.226     0.224          0.222      0.218    0.222
+# match(b,b)      2.370  0.870     0.505          0.890      0.880    0.505
+# b %in% b        2.350  0.850     0.480          0.865      0.870    0.483
+# duplicated(b)   0.875  0.510     0.141          0.116      0.383    0.117
+# unique(b)       0.930  0.555     0.447          0.156      0.427    0.450
+# table(b)      110.340  0.725     0.680          0.234      0.575    0.202
+# sort(b)         2.440  0.400     0.433          0.072      0.460    0.069
+# order(b)       12.780  0.680     0.615          0.036      0.036    0.035
+# rank(b)        13.480  0.860     0.915          0.240      0.545    0.246
+# quantile(b)     0.373  0.400     0.410          0.000      0.000    0.000
+# summary(b)      0.645  0.423     0.427          0.016      0.016    0.016
+# TOTAL         149.173  7.179     6.291          4.448     11.320    5.239
+              # 32-bit  64-bit hashcache sortordercache ordercache allcache
+# cache              1   1.062     0.000          0.000      0.000    0.000
+# match(s,b)         1   3.761   230.420         32.475      8.843  217.300
+# s %in% b           1   3.462   234.090         36.450      8.735  237.386
+# match(b,s)         1   1.974     1.940          2.009      2.009    1.991
+# b %in% s           1   2.257     2.277          2.297      2.339    2.297
+# match(b,b)         1   2.724     4.693          2.663      2.693    4.693
+# b %in% b           1   2.765     4.896          2.717      2.701    4.862
+# duplicated(b)      1   1.716     6.195          7.572      2.283    7.500
+# unique(b)          1   1.676     2.082          5.972      2.180    2.067
+# table(b)           1 152.193   162.265        471.538    191.896  546.238
+# sort(b)            1   6.100     5.631         33.822      5.304   35.534
+# order(b)           1  18.794    20.780        357.840    354.297  366.950
+# rank(b)            1  15.674    14.732         56.167     24.734   54.797
+# quantile(b)        1   0.933     0.911        804.907    806.027  810.133
+# summary(b)         1   1.524     1.512         39.345     39.345   39.345
+# TOTAL              1  20.778    23.712         33.534     13.177   28.476 
+
+  tim3
+}
+
+
+optimizer64 <- function(nsmall=2^16, nbig=2^25, timefun=repeat.time
+, what=c("match","%in%","duplicated","unique","unipos","table","rank","quantile")
+, uniorder = c("original", "values", "any")
+, taborder = c("values", "counts")
+, plot = TRUE
+)
+{
+ uniorder <- match.arg(uniorder)
+ taborder <- match.arg(taborder)
+ ret <- vector("list", 2*length(what))
+ dim(ret) <- c(length(what), 2L)
+ dimnames(ret) <- list(what, c(nsmall, nbig))
+ 
+ if (plot){
+  oldpar <- par(no.readonly = TRUE)
+  on.exit(par(oldpar))
+  par(mfrow=c(2,1))
+ }
+ 
+ if ("match" %in% what){
+  message("match: timings of different methods")
+  N1 <- c(nsmall, nbig)
+  N2 <- c(nbig, nsmall)
+  for (i in seq_along(N1)){
+   n1 <- N1[i]
+   n2 <- N2[i]
+   x1 <- c(sample(n2, n1-1, TRUE), NA)
+   x2 <- c(sample(n2, n2-1, TRUE), NA)
+   tim <- matrix(0, 9, 3)
+   dimnames(tim) <- list(c("match","match.64","hashpos","hashrev","sortorderpos","orderpos","hashcache","sortorder.cache","order.cache"), c("prep","both","use"))
+
+   tim["match","both"] <- timefun({
+    p <- match(x1, x2)
+   })[3]
+   x1 <- as.integer64(x1)
+   x2 <- as.integer64(x2)
+
+   tim["match.64","both"] <- timefun({
+    p2 <- match.integer64(x1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+
+   tim["hashpos","prep"] <- timefun({
+    h2 <- hashmap(x2)
+   })[3]
+   tim["hashpos","use"] <- timefun({
+    p2 <- hashpos(h2, x1)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["hashrev","prep"] <- timefun({
+    h1 <- hashmap(x1)
+   })[3]
+   tim["hashrev","use"] <- timefun({
+    p1 <- hashrev(h1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["sortorderpos","prep"] <- system.time({
+    s2 <- clone(x2)
+    o2 <- seq_along(x2)
+    ramsortorder(s2, o2, na.last=FALSE)
+   })[3]
+   tim["sortorderpos","use"] <- timefun({
+    p2 <- sortorderpos(s2, o2, x1)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["orderpos","prep"] <- timefun({
+    o2 <- seq_along(x2)
+    ramorder(x2, o2, na.last=FALSE)
+   })[3]
+   tim["orderpos","use"] <- timefun({
+    p2 <- orderpos(x2, o2, x1, method=2)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   hashcache(x2)
+   tim["hashcache","use"] <- timefun({
+    p2 <- match.integer64(x1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x2)
+   
+   sortordercache(x2)
+   tim["sortorder.cache","use"] <- timefun({
+    p2 <- match.integer64(x1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x2)
+   
+   ordercache(x2)
+   tim["order.cache","use"] <- timefun({
+    p2 <- match.integer64(x1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x2)
+
+   if (plot){
+    barplot(t(tim))
+    n <- format(c(n1, n2))
+    title(paste("match", n[1], "in", n[2]))
+   }
+   
+   ret[["match",as.character(n1)]] <- tim
+  }
+ }
+
+ if ("%in%" %in% what){
+  message("%in%: timings of different methods")
+  N1 <- c(nsmall, nbig)
+  N2 <- c(nbig, nsmall)
+  for (i in seq_along(N1)){
+   n1 <- N1[i]
+   n2 <- N2[i]
+   x1 <- c(sample(n2, n1-1, TRUE), NA)
+   x2 <- c(sample(n2, n2-1, TRUE), NA)
+   tim <- matrix(0, 10, 3)
+   dimnames(tim) <- list(c("%in%","match.64","%in%.64","hashfin","hashrin","sortfin","orderfin","hash.cache","sortorder.cache","order.cache"), c("prep","both","use"))
+
+   tim["%in%","both"] <- timefun({
+    p <- x1 %in% x2
+   })[3]
+   x1 <- as.integer64(x1)
+   x2 <- as.integer64(x2)
+
+   tim["match.64","both"] <- timefun({
+    p2 <- match.integer64(x1,x2, nomatch = 0L) > 0L
+   })[3]
+   stopifnot(identical(p2, p))
+
+   tim["%in%.64","both"] <- timefun({
+    p2 <- "%in%.integer64"(x1,x2) # this is using the custom version
+   })[3]
+   stopifnot(identical(p2, p))
+
+   tim["hashfin","prep"] <- timefun({
+    h2 <- hashmap(x2)
+   })[3]
+   tim["hashfin","use"] <- timefun({
+    p2 <- hashfin(h2, x1)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["hashrin","prep"] <- timefun({
+    h1 <- hashmap(x1)
+   })[3]
+   tim["hashrin","use"] <- timefun({
+    p1 <- hashrin(h1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["sortfin","prep"] <- timefun({
+    s2 <- clone(x2)
+    ramsort(s2, na.last=FALSE)
+   })[3]
+   tim["sortfin","use"] <- timefun({
+    p2 <- sortfin(s2, x1)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["orderfin","prep"] <- timefun({
+    o2 <- seq_along(x2)
+    ramorder(x2, o2, na.last=FALSE)
+   })[3]
+   tim["orderfin","use"] <- timefun({
+    p2 <- orderfin(x2, o2, x1)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   hashcache(x2)
+   tim["hash.cache","use"] <- timefun({
+    p2 <- "%in%.integer64"(x1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x2)
+   
+   sortordercache(x2)
+   tim["sortorder.cache","use"] <- timefun({
+    p2 <- "%in%.integer64"(x1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x2)
+   
+   ordercache(x2)
+   tim["order.cache","use"] <- timefun({
+    p2 <- "%in%.integer64"(x1, x2)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x2)
+   
+   if (plot){
+    barplot(t(tim))
+    n <- format(c(n1, n2))
+    title(paste(n[1], "%in%", n[2]))
+   }
+    
+   ret[["%in%",as.character(n1)]] <- tim
+  }
+ }
+ if ("duplicated" %in% what){
+  message("duplicated: timings of different methods")
+  N <- c(nsmall, nbig)
+  for (i in seq_along(N)){
+   n <- N[i]
+   x <- c(sample(n, n-1, TRUE), NA)
+   tim <- matrix(0, 10, 3)
+   dimnames(tim) <- list(c("duplicated","duplicated.64","hashdup","sortorderdup1","sortorderdup2","orderdup1","orderdup2"
+    ,"hash.cache","sortorder.cache","order.cache")
+   , c("prep","both","use"))
+
+   tim["duplicated","both"] <- timefun({
+    p <- duplicated(x)
+   })[3]
+   x <- as.integer64(x)
+
+   tim["duplicated.64","both"] <- timefun({
+    p2 <- duplicated(x)
+   })[3]
+   stopifnot(identical(p2, p))
+
+   tim["hashdup","prep"] <- timefun({
+    h <- hashmap(x)
+   })[3]
+   tim["hashdup","use"] <- timefun({
+    p2 <- hashdup(h)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["sortorderdup1","prep"] <- timefun({
+    s <- clone(x)
+    o <- seq_along(x)
+    ramsortorder(s, o, na.last=FALSE)
+    nunique <- sortnut(s)[1]
+   })[3]
+   tim["sortorderdup1","use"] <- timefun({
+    p2 <- sortorderdup(s, o, method=1)
+   })[3]
+   stopifnot(identical(p2, p))
+    
+   tim["sortorderdup2","prep"] <- tim["sortorderdup1","prep"]
+   tim["sortorderdup2","use"] <- timefun({
+    p2 <- sortorderdup(s, o, method=2)
+   })[3]
+   stopifnot(identical(p2, p))
+    
+   tim["orderdup1","prep"] <- timefun({
+    o <- seq_along(x)
+    ramorder(x, o, na.last=FALSE)
+    nunique <- ordernut(x,o)[1]
+   })[3]
+   tim["orderdup1","use"] <- timefun({
+    p2 <- orderdup(x, o, method=1)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["orderdup2","prep"] <- tim["orderdup1","prep"]
+   tim["orderdup2","use"] <- timefun({
+    p2 <- orderdup(x, o, method=2)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   hashcache(x)
+   tim["hash.cache","use"] <- timefun({
+    p2 <- duplicated(x)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x)
+
+   sortordercache(x)
+   tim["sortorder.cache","use"] <- timefun({
+    p2 <- duplicated(x)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x)
+
+   ordercache(x)
+   tim["order.cache","use"] <- timefun({
+    p2 <- duplicated(x)
+   })[3]
+   stopifnot(identical(p2, p))
+   remcache(x)
+   
+   if (plot){
+    barplot(t(tim), cex.names=0.7)
+    title(paste("duplicated(",n,")", sep=""))
+   }
+   
+   ret[["duplicated",as.character(n)]] <- tim
+  }
+ }
+ if ("unique" %in% what){
+  message("unique: timings of different methods")
+  N <- c(nsmall, nbig)
+  for (i in seq_along(N)){
+   n <- N[i]
+   x <- c(sample(n, n-1, TRUE), NA)
+   tim <- matrix(0, 15, 3)
+   dimnames(tim) <- list(
+   c("unique","unique.64","hashmapuni","hashuni","hashunikeep","sortuni","sortunikeep","orderuni","orderunikeep","hashdup","sortorderdup"
+    ,"hash.cache","sort.cache","sortorder.cache","order.cache")
+   , c("prep","both","use"))
+
+   tim["unique","both"] <- timefun({
+    p <- unique(x)
+   })[3]
+   x <- as.integer64(x)
+   p <- as.integer64(p)
+   if (uniorder=="values")
+    ramsort(p, na.last=FALSE)
+
+   tim["unique.64","both"] <- timefun({
+    p2 <- unique(x, order=uniorder)
+   })[3]
+   if (uniorder!="any")
+    stopifnot(identical.integer64(p2, p))
+
+   tim["hashmapuni","both"] <- timefun({
+    p2 <- hashmapuni(x)
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical.integer64(p2, p))
+   
+   tim["hashuni","prep"] <- timefun({
+    h <- hashmap(x)
+    # for(r in 1:r)h <- hashmap(x, nunique=h$nunique)
+   })[3]
+   tim["hashuni","use"] <- timefun({
+    p2 <- hashuni(h)
+   })[3]
+   if (uniorder=="values")
+    stopifnot(identical.integer64(sort(p2, na.last=FALSE), p))
+   
+   tim["hashunikeep","prep"] <- tim["hashuni","prep"] 
+   tim["hashunikeep","use"] <- timefun({
+    p2 <- hashuni(h, keep.order=TRUE)
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical.integer64(p2, p))
+
+   tim["sortuni","prep"] <- timefun({
+    s <- clone(x)
+    ramsort(s, na.last=FALSE)
+    nunique <- sortnut(s)[1]
+   })[3]
+   tim["sortuni","use"] <- timefun({
+    p2 <- sortuni(s, nunique)
+   })[3]
+   if (uniorder=="values")
+    stopifnot(identical.integer64(sort(p2, na.last=FALSE), p))
+   
+   tim["sortunikeep","prep"] <- timefun({
+    s <- clone(x)
+    o <- seq_along(x)
+    ramsortorder(s, o, na.last=FALSE)
+    nunique <- sortnut(s)[1]
+   })[3]
+   tim["sortunikeep","use"] <- timefun({
+    p2 <- sortorderuni(x, s, o, nunique)
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical.integer64(p2, p))
+    
+   tim["orderuni","prep"] <- timefun({
+    o <- seq_along(x)
+    ramorder(x, o, na.last=FALSE)
+    nunique <- ordernut(x,o)[1]
+   })[3]
+   tim["orderuni","use"] <- timefun({
+    p2 <- orderuni(x, o, nunique)
+   })[3]
+   if (uniorder=="values")
+    stopifnot(identical.integer64(sort(p2, na.last=FALSE), p))
+   
+   tim["orderunikeep","prep"] <- tim["orderuni","prep"]
+   tim["orderunikeep","use"] <- timefun({
+    p2 <- orderuni(x, o, nunique, keep.order=TRUE)
+    nunique <- ordernut(x,o)[1]
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical.integer64(p2, p))
+
+   tim["hashdup","prep"] <- tim["hashuni","prep"]
+   tim["hashdup","use"] <- timefun({
+    p2 <- x[!hashdup(h)]
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical.integer64(p2, p))
+
+   tim["sortorderdup","prep"] <- tim["sortunikeep","prep"]
+   tim["sortorderdup","use"] <- timefun({
+    p2 <- x[!sortorderdup(s, o)]
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical.integer64(p2, p))
+
+   
+   hashcache(x)
+   tim["hash.cache","use"] <- timefun({
+    p2 <- unique(x, order=uniorder)
+   })[3]
+   if (uniorder!="any")
+    stopifnot(identical.integer64(p2, p))
+   remcache(x)
+
+   sortcache(x)
+   tim["sort.cache","use"] <- timefun({
+    p2 <- unique(x, order=uniorder)
+   })[3]
+   if (uniorder!="any")
+    stopifnot(identical.integer64(p2, p))
+   remcache(x)
+
+   sortordercache(x)
+   tim["sortorder.cache","use"] <- timefun({
+    p2 <- unique(x, order=uniorder)
+   })[3]
+   if (uniorder!="any")
+    stopifnot(identical.integer64(p2, p))
+   remcache(x)
+
+   ordercache(x)
+   tim["order.cache","use"] <- timefun({
+    p2 <- unique(x, order=uniorder)
+   })[3]
+   if (uniorder!="any")
+    stopifnot(identical.integer64(p2, p))
+   remcache(x)
+   
+   if (plot){
+    barplot(t(tim), cex.names=0.7)
+    title(paste("unique(",n,", order=",uniorder,")", sep=""))
+   }
+   
+   ret[["unique",as.character(n)]] <- tim
+  }
+ }
+ if ("unipos" %in% what){
+  message("unipos: timings of different methods")
+  N <- c(nsmall, nbig)
+  for (i in seq_along(N)){
+   n <- N[i]
+   x <- c(sample(n, n-1, TRUE), NA)
+   tim <- matrix(0, 14, 3)
+   dimnames(tim) <- list(
+   c("unique","unipos.64","hashmapupo","hashupo","hashupokeep","sortorderupo","sortorderupokeep","orderupo","orderupokeep","hashdup","sortorderdup"
+    ,"hash.cache","sortorder.cache","order.cache")
+   , c("prep","both","use"))
+
+   tim["unique","both"] <- timefun({
+    unique(x)
+   })[3]
+   x <- as.integer64(x)
+
+   tim["unipos.64","both"] <- timefun({
+    p <- unipos(x, order=uniorder)
+   })[3]
+
+   tim["hashmapupo","both"] <- timefun({
+    p2 <- hashmapupo(x)
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical(p2, p))
+   
+   tim["hashupo","prep"] <- timefun({
+    h <- hashmap(x)
+    # if nunique is small we could re-build the hashmap at a smaller size
+    # h <- hashmap(x, nunique=h$nunique)
+   })[3]
+   tim["hashupo","use"] <- timefun({
+    p2 <- hashupo(h)
+   })[3]
+   if (uniorder=="values")
+    stopifnot(identical(sort(p2, na.last=FALSE), sort(p, na.last=FALSE)))
+   
+   tim["hashupokeep","prep"] <- tim["hashupo","prep"] 
+   tim["hashupokeep","use"] <- timefun({
+    p2 <- hashupo(h, keep.order=TRUE)
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical(p2, p))
+
+   
+   tim["sortorderupo","prep"] <- timefun({
+    s <- clone(x)
+    o <- seq_along(x)
+    ramsortorder(s, o, na.last=FALSE)
+    nunique <- sortnut(s)[1]
+   })[3]
+   tim["sortorderupo","use"] <- timefun({
+    p2 <- sortorderupo(s, o, nunique)
+   })[3]
+   if (uniorder=="values")
+    stopifnot(identical(p2, p))
+    
+   tim["sortorderupokeep","prep"] <- timefun({
+    s <- clone(x)
+    o <- seq_along(x)
+    ramsortorder(s, o, na.last=FALSE)
+    nunique <- sortnut(s)[1]
+   })[3]
+   tim["sortorderupokeep","use"] <- timefun({
+    p2 <- sortorderupo(s, o, nunique, keep.order=TRUE)
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical(p2, p))
+    
+   tim["orderupo","prep"] <- timefun({
+    o <- seq_along(x)
+    ramorder(x, o, na.last=FALSE)
+    nunique <- ordernut(x,o)[1]
+   })[3]
+   tim["orderupo","use"] <- timefun({
+    p2 <- orderupo(x, o, nunique)
+   })[3]
+   if (uniorder=="values")
+    stopifnot(identical(p2, p))
+   
+   tim["orderupokeep","prep"] <- tim["orderupo","prep"]
+   tim["orderupokeep","use"] <- timefun({
+    p2 <- orderupo(x, o, nunique, keep.order=TRUE)
+    nunique <- ordernut(x,o)[1]
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical(p2, p))
+
+   tim["hashdup","prep"] <- tim["hashupo","prep"]
+   tim["hashdup","use"] <- timefun({
+    p2 <- (1:n)[!hashdup(h)]
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical(p2, p))
+
+   tim["sortorderdup","prep"] <- tim["sortorderupokeep","prep"]
+   tim["sortorderdup","use"] <- timefun({
+    p2 <- (1:n)[!sortorderdup(s, o)]
+   })[3]
+   if (uniorder=="original")
+    stopifnot(identical(p2, p))
+   
+   hashcache(x)
+   tim["hash.cache","use"] <- timefun({
+    p2 <- unipos(x, order=uniorder)
+   })[3]
+   if (uniorder!="any")
+    stopifnot(identical(p2, p))
+   remcache(x)
+
+   sortordercache(x)
+   tim["sortorder.cache","use"] <- timefun({
+    p2 <- unipos(x, order=uniorder)
+   })[3]
+   if (uniorder!="any")
+    stopifnot(identical(p2, p))
+   remcache(x)
+
+   ordercache(x)
+   tim["order.cache","use"] <- timefun({
+    p2 <- unipos(x, order=uniorder)
+   })[3]
+   if (uniorder!="any")
+    stopifnot(identical(p2, p))
+   remcache(x)
+   
+   if (plot){
+    barplot(t(tim), cex.names=0.7)
+    title(paste("unipos(",n,", order=",uniorder,")", sep=""))
+   }
+   
+   ret[["unipos",as.character(n)]] <- tim
+  }
+ }
+ if ("table" %in% what){
+  message("table: timings of different methods")
+  N <- c(nsmall, nbig)
+  for (i in seq_along(N)){
+   n <- N[i]
+   x <- c(sample(1024, n-1, TRUE), NA)
+   tim <- matrix(0, 13, 3)
+   dimnames(tim) <- list(c("tabulate","table","table.64","hashmaptab","hashtab","hashtab2","sorttab","sortordertab","ordertab","ordertabkeep"
+    ,"hash.cache","sort.cache","order.cache")
+   , c("prep","both","use"))
+
+   tim["tabulate","both"] <- timefun({
+    tabulate(x)
+   })[3]
+   
+   tim["table","both"] <- timefun({
+    p <- table(x, exclude=NULL)
+   })[3]
+   p <- p[-length(p)]
+   
+   x <- as.integer64(x)
+
+   tim["table.64","both"] <- timefun({
+    p2 <- table.integer64(x, order=taborder)
+   })[3]
+   p2 <- p2[-1]
+   stopifnot(identical(p2, p))
+
+   tim["hashmaptab","both"] <- timefun({
+    p <- hashmaptab(x)
+   })[3]
+   
+   tim["hashtab","prep"] <- timefun({
+    h <- hashmap(x)
+   })[3]
+   tim["hashtab","use"] <- timefun({
+    p2 <- hashtab(h)
+   })[3]
+   stopifnot(identical(p2, p))
+   
+   tim["hashtab2","prep"] <- tim["hashtab","prep"] + timefun({
+    h <- hashmap(x, nunique=h$nunique)
+   })[3]
+   tim["hashtab2","use"] <- timefun({
+    p2 <- hashtab(h)
+   })[3]
+   
+   sortp <- function(p){
+    s <- p$values
+    o <- seq_along(s)
+    ramsortorder(s,o, na.last=FALSE)
+    list(values=s, counts=p$counts[o])
+   }
+   p <- sortp(p)
+   p2 <- sortp(p2)
+   stopifnot(identical(p2, p))
+   
+   tim["sorttab","prep"] <- timefun({
+    s <- clone(x)
+    ramsort(s, na.last=FALSE)
+    nunique <- sortnut(s)[1]
+   })[3]
+   tim["sorttab","use"] <- timefun({
+    p2 <- list(values=sortuni(s, nunique), counts=sorttab(s, nunique))
+   })[3]
+   stopifnot(identical(p2, p))
+    
+   tim["sortordertab","prep"] <- timefun({
+    s <- clone(x)
+    o <- seq_along(x)
+    ramsortorder(s, o, na.last=FALSE)
+    nunique <- sortnut(s)[1]
+  	})[3]
+			tim["sortordertab","use"] <- timefun({
+				p2 <- list(values=sortorderuni(x, s, o, nunique), counts=sortordertab(s, o))
+			})[3]
+			p2 <- sortp(p2)
+			stopifnot(identical(p2, p))
+				
+			tim["ordertab","prep"] <- timefun({
+				o <- seq_along(x)
+				ramorder(x, o, na.last=FALSE)
+				nunique <- ordernut(x, o)[1]
+			})[3]
+			tim["ordertab","use"] <- timefun({
+				p2 <- list(values=orderuni(x, o, nunique), counts=ordertab(x, o, nunique))
+			})[3]
+			stopifnot(identical(p2, p))
+				
+			tim["ordertabkeep","prep"] <- tim["ordertab","prep"] 
+			tim["ordertabkeep","use"] <- timefun({
+				p2 <- list(values=orderuni(x, o, nunique, keep.order=TRUE), counts=ordertab(x, o, nunique, keep.order=TRUE))
+			})[3]
+			p2 <- sortp(p2)
+			stopifnot(identical(p2, p))
+			
+			hashcache(x)
+			tim["hash.cache","use"] <- timefun({
+				p <- table.integer64(x, order=taborder)
+			})[3]
+			remcache(x)
+
+			sortordercache(x)
+			tim["sort.cache","use"] <- timefun({
+				p2 <- table.integer64(x, order=taborder)
+			})[3]
+			stopifnot(identical(p2, p))
+			remcache(x)
+
+			ordercache(x)
+			tim["order.cache","use"] <- timefun({
+				p2 <- table.integer64(x, order=taborder)
+			})[3]
+			stopifnot(identical(p2, p))
+			remcache(x)
+			
+			if (plot){
+				barplot(t(tim), cex.names=0.7)
+				title(paste("table.integer64(",n,", order=",taborder,")", sep=""))
+			}
+			
+			ret[["table",as.character(n)]] <- tim
+		}
+	}
+	if ("rank" %in% what){
+		message("rank: timings of different methods")
+		N <- c(nsmall, nbig)
+		for (i in seq_along(N)){
+			n <- N[i]
+			x <- c(sample(n, n-1, TRUE), NA)
+			tim <- matrix(0, 7, 3)
+			dimnames(tim) <- list(c("rank","rank.keep","rank.64","sortorderrnk","orderrnk"
+				,"sort.cache","order.cache")
+			, c("prep","both","use"))
+
+			tim["rank","both"] <- timefun({
+				rank(x)
+			})[3]
+			
+			tim["rank.keep","both"] <- timefun({
+				p <- rank(x, na.last="keep")
+			})[3]
+			
+			x <- as.integer64(x)
+
+			tim["rank.64","both"] <- timefun({
+				p2 <- rank.integer64(x)
+			})[3]
+			stopifnot(identical(p2, p))
+				
+			tim["sortorderrnk","prep"] <- timefun({
+				s <- clone(x)
+				o <- seq_along(x)
+				na.count <- ramsortorder(s, o, na.last=FALSE)
+			})[3]
+			tim["sortorderrnk","use"] <- timefun({
+				p2 <- sortorderrnk(s, o, na.count)
+			})[3]
+			stopifnot(identical(p2, p))
+				
+			tim["orderrnk","prep"] <- timefun({
+				o <- seq_along(x)
+				na.count <- ramorder(x, o, na.last=FALSE)
+			})[3]
+			tim["orderrnk","use"] <- timefun({
+				p2 <- orderrnk(x, o, na.count)
+			})[3]
+			stopifnot(identical(p2, p))
+				
+			sortordercache(x)
+			tim["sort.cache","use"] <- timefun({
+				p2 <- rank.integer64(x)
+			})[3]
+			stopifnot(identical(p2, p))
+			remcache(x)
+
+			ordercache(x)
+			tim["order.cache","use"] <- timefun({
+				p2 <- rank.integer64(x)
+			})[3]
+			stopifnot(identical(p2, p))
+			remcache(x)
+			
+			if (plot){
+				barplot(t(tim), cex.names=0.7)
+				title(paste("rank.integer64(",n,")", sep=""))
+			}
+			
+			ret[["rank",as.character(n)]] <- tim
+		}
+	}
+	if ("quantile" %in% what){
+		message("quantile: timings of different methods")
+		N <- c(nsmall, nbig)
+		for (i in seq_along(N)){
+			n <- N[i]
+			x <- c(sample(n, n-1, TRUE), NA)
+			tim <- matrix(0, 6, 3)
+			dimnames(tim) <- list(c("quantile","quantile.64","sortqtl","orderqtl"
+				,"sort.cache","order.cache")
+			, c("prep","both","use"))
+
+			tim["quantile","both"] <- timefun({
+				p <- quantile(x, type=1, na.rm=TRUE)
+			})[3]
+			p2 <- p
+			p <- as.integer64(p2)
+			names(p) <- names(p2)
+			
+			x <- as.integer64(x)
+
+			tim["quantile.64","both"] <- timefun({
+				p2 <- quantile(x, na.rm=TRUE)
+			})[3]
+			stopifnot(identical(p2, p))
+				
+			tim["sortqtl","prep"] <- timefun({
+				s <- clone(x)
+				na.count <- ramsort(s, na.last=FALSE)
+			})[3]
+			tim["sortqtl","use"] <- timefun({
+				p2 <- sortqtl(s, na.count, seq(0, 1, 0.25))
+			})[3]
+			stopifnot(identical(unname(p2), unname(p)))
+				
+			tim["orderqtl","prep"] <- timefun({
+				o <- seq_along(x)
+				na.count <- ramorder(x, o, na.last=FALSE)
+			})[3]
+			tim["orderqtl","use"] <- timefun({
+				p2 <- orderqtl(x, o, na.count, seq(0, 1, 0.25))
+			})[3]
+			stopifnot(identical(unname(p2), unname(p)))
+				
+			sortordercache(x)
+			tim["sort.cache","use"] <- timefun({
+				p2 <- quantile(x, na.rm=TRUE)
+			})[3]
+			stopifnot(identical(p2, p))
+			remcache(x)
+
+			ordercache(x)
+			tim["order.cache","use"] <- timefun({
+				p2 <- quantile(x, na.rm=TRUE)
+			})[3]
+			stopifnot(identical(p2, p))
+			remcache(x)
+			
+			if (plot){
+				barplot(t(tim), cex.names=0.7)
+				title(paste("quantile(",n,")", sep=""))
+			}
+			
+			ret[["quantile",as.character(n)]] <- tim
+		}
+	}
+
+	ret
+	
+}
+
+
+#! \name{match.integer64}
+#! \alias{match.integer64}
+#! \alias{\%in\%.integer64}
+#! \title{
+#! 64-bit integer matching
+#! }
+#! \description{
+#! \code{match} returns a vector of the positions of (first) matches of its first argument in its second. 
+#! 
+#! \code{\%in\%} is a more intuitive interface as a binary operator, which returns a logical vector indicating if there is a match or not for its left operand. 
+#! 
+#! }
+#! \usage{
+#! \method{match}{integer64}(x, table, nomatch = NA_integer_, nunique = NULL, method = NULL, ...)
+#! \method{\%in\%}{integer64}(x, table, ...)
+#! }
+#! \arguments{
+#!   \item{x}{
+#! 	integer64 vector: the values to be matched, optionally carrying a cache created with \code{\link{hashcache}}
+#! }
+#!   \item{table}{
+#! 	integer64 vector: the values to be matched against, optionally carrying a cache created with \code{\link{hashcache}} or \code{\link{sortordercache}}
+#! }
+#!   \item{nomatch}{
+#!   the value to be returned in the case when no match is found. Note that it is coerced to integer.
+#! }
+#!   \item{nunique}{
+#! 	NULL or the number of unique values of table (including NA). Providing \code{nunique} can speed-up matching when \code{table} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+#! }
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{\dots}{
+#! ignored
+#! }
+#! }
+#! \details{
+#!   These functions automatically choose from several low-level functions considering the size of \code{x} and \code{table} and the availability of caches. 
+#! 
+#! 
+#!   Suitable methods for \code{\%in\%.integer64} are \code{\link{hashpos}} (hash table lookup), \code{\link{hashrev}} (reverse lookup), \code{\link{sortorderpos}} (fast ordering) and \code{\link{orderpos}} (memory saving ordering).
+#!   Suitable methods for \code{match.integer64} are \code{\link{hashfin}} (hash table lookup), \code{\link{hashrin}} (reverse lookup), \code{\link{sortfin}} (fast sorting) and \code{\link{orderfin}} (memory saving ordering).
+#! }
+#! \value{
+#!   A vector of the same length as \code{x}.
+#! 
+#!   \code{match}: An integer vector giving the position in \code{table} of
+#!   the first match if there is a match, otherwise \code{nomatch}.
+#! 
+#!   If \code{x[i]} is found to equal \code{table[j]} then the value
+#!   returned in the \code{i}-th position of the return value is \code{j},
+#!   for the smallest possible \code{j}.  If no match is found, the value
+#!   is \code{nomatch}.
+#! 
+#!   \code{\%in\%}: A logical vector, indicating if a match was located for
+#!   each element of \code{x}: thus the values are \code{TRUE} or
+#!   \code{FALSE} and never \code{NA}.
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#! 	\code{\link{match}}
+#! }
+#! \examples{
+#! x <- as.integer64(c(NA, 0:9), 32)
+#! table <- as.integer64(c(1:9, NA))
+#! match.integer64(x, table)
+#! "\%in\%.integer64"(x, table)
+#!
+#! x <- as.integer64(sample(c(rep(NA, 9), 0:9), 32, TRUE))
+#! table <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! stopifnot(identical(match.integer64(x, table), match(as.integer(x), as.integer(table))))
+#! stopifnot(identical("\%in\%.integer64"(x, table), as.integer(x) \%in\% as.integer(table)))
+#! 
+#! \dontrun{
+#! 	message("check when reverse hash-lookup beats standard hash-lookup")
+#! 	e <- 4:24
+#! 	timx <- timy <- matrix(NA, length(e), length(e), dimnames=list(e,e))
+#! 	for (iy in seq_along(e))
+#! 	for (ix in 1:iy){
+#! 		nx <- 2^e[ix]
+#! 		ny <- 2^e[iy]
+#! 		x <- as.integer64(sample(ny, nx, FALSE))
+#! 		y <- as.integer64(sample(ny, ny, FALSE))
+#! 		#hashfun(x, bits=as.integer(5))
+#! 		timx[ix,iy] <- repeat.time({
+#! 		hx <- hashmap(x)
+#! 		py <- hashrev(hx, y)
+#! 		})[3]
+#! 		timy[ix,iy] <- repeat.time({
+#! 		hy <- hashmap(y)
+#! 		px <- hashpos(hy, x)
+#! 		})[3]
+#! 		#identical(px, py)
+#! 		print(round(timx[1:iy,1:iy]/timy[1:iy,1:iy], 2), na.print="")
+#! 	}
+#!
+#! 	message("explore best low-level method given size of x and table")
+#! 	B1 <- 1:27
+#! 	B2 <- 1:27
+#! 	tim <- array(NA, dim=c(length(B1), length(B2), 5)
+#!  , dimnames=list(B1, B2, c("hashpos","hashrev","sortpos1","sortpos2","sortpos3")))
+#! 	for (i1 in B1)
+#! 	for (i2 in B2)
+#! 	{
+#! 	  b1 <- B1[i1]
+#! 	  b2 <- B1[i2]
+#! 	  n1 <- 2^b1
+#! 	  n2 <- 2^b2
+#! 	  x1 <- as.integer64(c(sample(n2, n1-1, TRUE), NA))
+#! 	  x2 <- as.integer64(c(sample(n2, n2-1, TRUE), NA))
+#! 	  tim[i1,i2,1] <- repeat.time({h <- hashmap(x2);hashpos(h, x1);rm(h)})[3]
+#! 	  tim[i1,i2,2] <- repeat.time({h <- hashmap(x1);hashrev(h, x2);rm(h)})[3]
+#! 	  s <- clone(x2); o <- seq_along(s); ramsortorder(s, o)
+#! 	  tim[i1,i2,3] <- repeat.time(sortorderpos(s, o, x1, method=1))[3]
+#! 	  tim[i1,i2,4] <- repeat.time(sortorderpos(s, o, x1, method=2))[3]
+#! 	  tim[i1,i2,5] <- repeat.time(sortorderpos(s, o, x1, method=3))[3]
+#! 	  rm(s,o)
+#! 	  print(apply(tim, 1:2, function(ti)if(any(is.na(ti)))NA else which.min(ti)))
+#! 	}
+#! }
+#! }
+#! \keyword{manip}
+#! \keyword{logic}
+
+
+match.integer64 <- function(x, table, nomatch = NA_integer_, nunique=NULL, method=NULL, ...){
+  stopifnot(is.integer64(x) &&  is.integer64(table))  # xx TODO
+  c <- cache(table)
+  if (is.null(method)){
+    if (is.null(c)){
+			nx <- length(x)
+			if (is.null(nunique))
+				nunique <- length(table)
+			btable <- as.integer(ceiling(log2(nunique*1.5)))
+			bx <- as.integer(ceiling(log2(nx*1.5)))
+			if (bx<=17 && btable>=16){
+				method <- "hashrev"
+			}else{
+				method <- "hashpos"
+			}
+	}else{
+		if (exists("hashmap", envir=c, inherits=FALSE)){
+			method <- "hashpos"
+		}else if (exists("sort", envir=c, inherits=FALSE) && exists("order", envir=c, inherits=FALSE) && (length(table)>length(x) || length(x)<4096)){
+			method <- "sortorderpos"
+		}else if (exists("order", envir=c, inherits=FALSE) && (length(table)>length(x) || length(x)<4096)){
+			method <- "orderpos"
+		}else{
+			nx <- length(x)
+			if (is.null(nunique)){
+			  if (exists("nunique", envir=c, inherits=FALSE))
+				nunique <- c$nunique
+			  else
+				nunique <- length(table)
+			}
+			btable <- as.integer(ceiling(log2(nunique*1.5)))
+			bx <- as.integer(ceiling(log2(nx*1.5)))
+			if (bx<=17 && btable>=16){
+				method <- "hashrev"
+			}else{
+				method <- "hashpos"
+			}
+		}
+	}
+  }
+  switch(method
+  , hashpos={
+			if (is.null(c) || !exists("hashmap", envir=c, inherits=FALSE)){
+				if (exists("btable", inherits=FALSE))
+					h <- hashmap(table, hashbits=btable)
+				else{
+					if (is.null(nunique))
+						nunique <- c$nunique
+					h <- hashmap(table, nunique=nunique)
+				}
+			}else
+				h <- c
+			p <- hashpos(h, x, nomatch=nomatch)
+    }
+  , hashrev={
+		c <- cache(x)
+		if (is.null(c) || !exists("hashmap", envir=c, inherits=FALSE)){
+				if (exists("bx", inherits=FALSE))
+					h <- hashmap(x, bits=bx)
+				else{
+					if (is.null(nunique))
+						nunique <- c$nunique
+					h <- hashmap(x, nunique=nunique)
+				}
+			}else
+				h <- c
+		p <- hashrev(h, table, nomatch=nomatch)
+    }
+  , sortorderpos={
+		if (is.null(c) || !exists("sort", c) || !exists("order", c)){
+			s <- clone(table)
+			o <- seq_along(s)
+			ramsortorder(s, o, na.last=FALSE)
+		}else{
+			s <- get("sort", c)
+			o <- get("order", c)
+		}
+		p <- sortorderpos(s, o, x, nomatch=nomatch)
+    }
+  , orderpos={
+		if (is.null(c) || !exists("order", c)){
+			o <- seq_along(s)
+			ramorder(table, o, na.last=FALSE)
+		}else{
+			o <- get("order", c)
+		}
+		p <- orderpos(table, o, x, nomatch=nomatch)
+    }
+  , stop("unknown method")
+  )
+  p
+}
+
+
+"%in%.integer64" <- function(x, table, ...){
+  stopifnot(is.integer64(x) &&  is.integer64(table))  # xx TODO
+	nunique <- NULL
+	method <- NULL
+  c <- cache(table)
+  if (is.null(method)){
+    if (is.null(c)){
+			nx <- length(x)
+			if (is.null(nunique))
+				nunique <- length(table)
+			btable <- as.integer(ceiling(log2(nunique*1.5)))
+			bx <- as.integer(ceiling(log2(nx*1.5)))
+			if (bx<=17 && btable>=16){
+				method <- "hashrin"
+			}else{
+				method <- "hashfin"
+			}
+	}else{
+		if (exists("hashmap", envir=c, inherits=FALSE)){
+			method <- "hashfin"
+		}else if (exists("sort", envir=c, inherits=FALSE) && (length(table)>length(x) || length(x)<4096)){
+			method <- "sortfin"
+		}else if (exists("order", envir=c, inherits=FALSE) && (length(table)>length(x) || length(x)<4096)){
+			method <- "orderfin"
+		}else{
+			nx <- length(x)
+			if (is.null(nunique)){
+			  if (exists("nunique", envir=c, inherits=FALSE))
+				nunique <- c$nunique
+			  else
+				nunique <- length(table)
+			}
+			btable <- as.integer(ceiling(log2(nunique*1.5)))
+			bx <- as.integer(ceiling(log2(nx*1.5)))
+			if (bx<=17 && btable>=16){
+				method <- "hashrin"
+			}else{
+				method <- "hashfin"
+			}
+		}
+	}
+  }
+  switch(method
+  , hashfin={
+		if (is.null(c) || !exists("hashmap", envir=c, inherits=FALSE)){
+			if (exists("btable", inherits=FALSE))
+				h <- hashmap(table, hashbits=btable)
+			else{
+				if (is.null(nunique))
+					nunique <- c$nunique
+				h <- hashmap(table, nunique=nunique)
+			}
+		}else
+			h <- c
+		p <- hashfin(h, x)
+    }
+  , hashrin={
+		c <- cache(x)
+		if (is.null(c) || !exists("hashmap", envir=c, inherits=FALSE)){
+				if (exists("bx", inherits=FALSE))
+					h <- hashmap(x, bits=bx)
+				else{
+					if (is.null(nunique))
+						nunique <- c$nunique
+					h <- hashmap(x, nunique=nunique)
+				}
+		}else
+			h <- c
+		p <- hashrin(h, table)
+    }
+  , sortfin={
+		if (is.null(c) || !exists("sort", c)){
+			s <- clone(table)
+			ramsort(s, na.last=FALSE)
+		}else{
+			s <- get("sort", c)
+		}
+		p <- sortfin(s, x)
+    }
+  , orderfin={
+		if (is.null(c) || !exists("order", c)){
+			o <- seq_along(s)
+			ramorder(table, o, na.last=FALSE)
+		}else{
+			o <- get("order", c)
+		}
+		p <- orderfin(table, o, x)
+    }
+  , stop("unknown method")
+  )
+  p
+}
+
+#! \name{duplicated.integer64}
+#! \alias{duplicated.integer64}
+#! \title{Determine Duplicate Elements of integer64}
+#! \description{
+#!   \code{duplicated()} determines which elements of a vector or data frame are duplicates
+#!   of elements with smaller subscripts, and returns a logical vector
+#!   indicating which elements (rows) are duplicates.
+#! }
+#! \usage{
+#! \method{duplicated}{integer64}(x, incomparables = FALSE, nunique = NULL, method = NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{a vector or a data frame or an array or \code{NULL}.}
+#!   \item{incomparables}{ignored}
+#!   \item{nunique}{
+#! 	NULL or the number of unique values (including NA). Providing \code{nunique} can speed-up matching when \code{x} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+#! }
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{\dots}{ignored}
+#! }
+#! \details{
+#!   This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+#! 
+#!   Suitable methods are \code{\link{hashdup}} (hashing), \code{\link{sortorderdup}} (fast ordering) and \code{\link{orderdup}} (memory saving ordering).
+#! }
+#! \value{
+#!     \code{duplicated()}: a logical vector of the same length as \code{x}.  
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{ \code{\link{duplicated}}, \code{\link{unique.integer64}}  }
+#! \examples{
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! duplicated(x)
+#! 
+#! stopifnot(identical(duplicated(x),  duplicated(as.integer(x))))
+#! }
+#! \keyword{logic}
+#! \keyword{manip}
+#! 
+
+duplicated.integer64 <- function(x
+, incomparables = FALSE  # dummy parameter
+, nunique = NULL
+, method = NULL
+, ...
+){
+  stopifnot(identical(incomparables, FALSE))
+  c <- cache(x)
+  if (is.null(nunique) && !is.null(c))
+	nunique <- c$nunique
+  if (is.null(method)){
+    if (is.null(c)){
+		if (length(x)>5e7)
+			method <- "sortorderdup"
+		else
+			method <- "hashdup"
+	}else{
+		if (exists("sort", envir=c, inherits=FALSE) && exists("order", envir=c, inherits=FALSE))
+			method <- "sortorderdup"
+		else if (exists("hashmap", envir=c, inherits=FALSE))
+			method <- "hashdup"
+		else if (exists("order", envir=c, inherits=FALSE))
+			method <- "orderdup"
+		else if (length(x)>5e7)
+			method <- "sortorderdup"
+		else
+			method <- "hashdup"
+	}
+  }
+  switch(method
+  , hashdup={
+		if (is.null(c) || !exists("hashmap", envir=c, inherits=FALSE))
+			h <- hashmap(x, nunique=nunique)
+		else
+			h <- c
+		p <- hashdup(h)
+    }
+  , sortorderdup={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE) || !exists("order", c, inherits=FALSE)){
+			s <- clone(x)
+			o <- seq_along(s)
+			ramsortorder(s, o, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+			o <- get("order", c, inherits=FALSE)
+		}
+		p <- sortorderdup(s, o)
+    }
+  , orderdup={
+		if (is.null(c) || !exists("order", c, inherits=FALSE)){
+			o <- seq_along(s)
+			ramorder(x, o, na.last=FALSE)
+		}else{
+			o <- get("order", c, inherits=FALSE)
+		}
+		p <- orderdup(x, o)
+    }
+  , stop("unknown method", method)
+  )
+  p
+}
+
+
+#! \name{unique.integer64}
+#! \alias{unique.integer64}
+#! \title{Extract Unique Elements from integer64}
+#! \description{
+#!   \code{unique} returns a vector like \code{x} but with duplicate elements/rows removed.
+#! }
+#! \usage{
+#! \method{unique}{integer64}(x, incomparables = FALSE, order = c("original","values","any")
+#! , nunique = NULL, method = NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{a vector or a data frame or an array or \code{NULL}.}
+#!   \item{incomparables}{ignored}
+#!   \item{order}{The order in which unique values will be returned, see details}
+#!   \item{nunique}{
+#! 	NULL or the number of unique values (including NA). Providing \code{nunique} can speed-up matching when \code{x} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+#! }
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{\dots}{ignored}
+#! }
+#! \details{
+#!   This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+#!   Suitable methods are \code{\link{hashmapuni}} (simultaneously creating and using a hashmap)
+#! , \code{\link{hashuni}} (first creating a hashmap then using it)
+#! , \code{\link{sortuni}} (fast sorting for sorted order only)
+#! , \code{\link{sortorderuni}} (fast ordering for original order only) 
+#! and \code{\link{orderuni}} (memory saving ordering).
+#! \cr
+#! The default \code{order="original"} returns unique values in the order of the first appearance in \code{x} like in \code{\link{unique}}, this costs extra processing. 
+#! \code{order="values"} returns unique values in sorted order like in \code{\link{table}}, this costs extra processing with the hash methods but comes for free. 
+#! \code{order="any"} returns unique values in undefined order, possibly faster. For hash methods this will be a quasi random order, for sort methods this will be sorted order.
+#! }
+#! \value{
+#!   For a vector, an object of the same type of \code{x}, but with only
+#!   one copy of each duplicated element.  No attributes are copied (so
+#!   the result has no names).
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#!   \code{\link{unique}} for the generic, \code{\link{unipos}} which gives the indices of the unique
+#!   elements and \code{\link{table.integer64}} which gives frequencies of the unique elements.
+#! }
+#! \examples{
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! unique(x)
+#! unique(x, order="values")
+#! 
+#! stopifnot(identical(unique(x),  x[!duplicated(x)]))
+#! stopifnot(identical(unique(x),  as.integer64(unique(as.integer(x)))))
+#! stopifnot(identical(unique(x, order="values")
+#! ,  as.integer64(sort(unique(as.integer(x)), na.last=FALSE))))
+#! }
+#! \keyword{manip}
+#! \keyword{logic}
+
+
+unique.integer64 <- function(x
+, incomparables = FALSE  # dummy parameter
+, order = c("original","values","any")
+, nunique = NULL
+, method = NULL
+, ...
+){
+  stopifnot(identical(incomparables, FALSE))
+  order <- match.arg(order)
+  c <- cache(x)
+  keep.order <- order == "original"
+  if (is.null(nunique) && !is.null(c))
+	nunique <- c$nunique
+  if (is.null(method)){
+    if (is.null(c)){
+		if (order=="values")
+			method <- "sortuni"
+		else
+			method <- "hashmapuni"
+	}else{
+		switch(order
+		, "original" = {
+			if (exists("hashmap", envir=c, inherits=FALSE))
+				method <- "hashuni"
+			else if (exists("order", envir=c, inherits=FALSE)){
+				if (exists("sort", envir=c, inherits=FALSE))
+					method <- "sortorderuni"
+				else
+					method <- "orderuni"
+			}else
+				method <- "hashmapuni"
+		}
+		, "values" = {
+			if (exists("sort", envir=c, inherits=FALSE))
+				method <- "sortuni"
+			else if (exists("order", envir=c, inherits=FALSE))
+				method <- "orderuni"
+			else if (exists("hashmap", envir=c, inherits=FALSE) && c$nunique<length(x)/2)
+				method <- "hashuni"
+			else
+				method <- "sortuni"
+		}
+		, "any" = {
+			if (exists("sort", envir=c, inherits=FALSE))
+				method <- "sortuni"
+			else if (exists("hashmap", envir=c, inherits=FALSE))
+				method <- "hashuni"
+			else if (exists("order", envir=c, inherits=FALSE))
+				method <- "orderuni"
+			else
+				method <- "sortuni"
+		}
+		)
+	}
+  }
+  switch(method
+  , hashmapuni={
+		p <- hashmapuni(x, nunique=nunique)
+    }
+  , hashuni={
+		if (is.null(c) || !exists("hashmap", envir=c, inherits=FALSE))
+			h <- hashmap(x, nunique=nunique)
+		else
+			h <- c
+		p <- hashuni(h, keep.order=keep.order)
+		if (order=="values")
+			ramsort(p, na.last=FALSE)
+    }
+  , sortuni={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE)){
+			s <- clone(x)
+			ramsort(s, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+		}
+		if (is.null(nunique))
+			nunique <- sortnut(s)[1]
+		p <- sortuni(s, nunique)
+    }
+  , sortorderuni={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE) || !exists("order", c, inherits=FALSE)){
+			s <- clone(x)
+			o <- seq_along(x)
+			ramsortorder(s, o, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+			o <- get("order", c, inherits=FALSE)
+		}
+		if (is.null(nunique))
+			nunique <- sortnut(s)[1]
+		p <- sortorderuni(x, s, o, nunique)
+    }
+  , orderuni={
+		if (is.null(c) || !exists("order", c, inherits=FALSE)){
+			o <- seq_along(x)
+			ramorder(x, o, na.last=FALSE)
+		}else{
+			o <- get("order", c, inherits=FALSE)
+		}
+		if (is.null(nunique))
+			nunique <- ordernut(x, o)[1]
+		p <- orderuni(x, o, nunique, keep.order=keep.order)
+    }
+  , stop("unknown method", method)
+  )
+  p
+}
+
+
+#! \name{unipos}
+#! \alias{unipos}
+#! \alias{unipos.integer64}
+#! \title{Extract Positions of Unique Elements}
+#! \description{
+#!   \code{unipos} returns the positions of those elements returned by \code{\link{unique}}.
+#! }
+#! \usage{
+#! unipos(x, incomparables = FALSE, order = c("original","values","any"), \dots)
+#! \method{unipos}{integer64}(x, incomparables = FALSE, order = c("original","values","any")
+#! , nunique = NULL, method = NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{a vector or a data frame or an array or \code{NULL}.}
+#!   \item{incomparables}{ignored}
+#!   \item{order}{The order in which positions of unique values will be returned, see details}
+#!   \item{nunique}{
+#! 	NULL or the number of unique values (including NA). Providing \code{nunique} can speed-up when \code{x} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+#! }
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{\dots}{ignored}
+#! }
+#! \details{
+#!   This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+#!   Suitable methods are \code{\link{hashmapupo}} (simultaneously creating and using a hashmap)
+#! , \code{\link{hashupo}} (first creating a hashmap then using it)
+#! , \code{\link{sortorderupo}} (fast ordering) 
+#! and \code{\link{orderupo}} (memory saving ordering).
+#! \cr
+#! The default \code{order="original"} collects unique values in the order of the first appearance in \code{x} like in \code{\link{unique}}, this costs extra processing. 
+#! \code{order="values"} collects unique values in sorted order like in \code{\link{table}}, this costs extra processing with the hash methods but comes for free. 
+#! \code{order="any"} collects unique values in undefined order, possibly faster. For hash methods this will be a quasi random order, for sort methods this will be sorted order.
+#! }
+#! \value{
+#!   an integer vector of positions
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#!   \code{\link{unique.integer64}} for unique values and \code{\link{match.integer64}} for general matching.
+#! }
+#! \examples{
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! unipos(x)
+#! unipos(x, order="values")
+#! 
+#! stopifnot(identical(unipos(x),  (1:length(x))[!duplicated(x)]))
+#! stopifnot(identical(unipos(x),  match.integer64(unique(x), x)))
+#! stopifnot(identical(unipos(x, order="values"),  match.integer64(unique(x, order="values"), x)))
+#! stopifnot(identical(unique(x),  x[unipos(x)]))
+#! stopifnot(identical(unique(x, order="values"),  x[unipos(x, order="values")]))
+#! }
+#! \keyword{manip}
+#! \keyword{logic}
+
+
+unipos <- function(x, incomparables = FALSE, order = c("original","values","any"), ...)UseMethod("unipos")
+unipos.integer64 <- function(x
+, incomparables = FALSE  # dummy parameter
+, order = c("original","values","any")
+, nunique = NULL
+, method = NULL
+, ...
+){
+  stopifnot(identical(incomparables, FALSE))
+  order <- match.arg(order)
+  c <- cache(x)
+  keep.order <- order == "original"
+  if (is.null(nunique) && !is.null(c))
+	nunique <- c$nunique
+  if (is.null(method)){
+    if (is.null(c)){
+		if (order=="values")
+			method <- "sortorderupo"
+		else
+			method <- "hashmapupo"
+	}else{
+		switch(order
+		, "original" = {
+			if (exists("hashmap", envir=c, inherits=FALSE))
+				method <- "hashupo"
+			else if (exists("order", envir=c, inherits=FALSE)){
+				if (exists("sort", envir=c, inherits=FALSE))
+					method <- "sortorderupo"
+				else
+					method <- "orderupo"
+			}else
+				method <- "hashmapupo"
+		}
+		, "values" = {
+			if (exists("order", envir=c, inherits=FALSE)){
+				if (exists("sort", envir=c, inherits=FALSE))
+					method <- "sortorderupo"
+				else
+					method <- "orderupo"
+			}else if (exists("hashmap", envir=c, inherits=FALSE) && c$nunique<length(x)/2)
+				method <- "hashupo"
+			else
+				method <- "sortorderupo"
+		}
+		, "any" = {
+			if (exists("sort", envir=c, inherits=FALSE) && exists("order", envir=c, inherits=FALSE))
+				method <- "sortorderupo"
+			else if (exists("hashmap", envir=c, inherits=FALSE))
+				method <- "hashupo"
+			else if (exists("order", envir=c, inherits=FALSE))
+				method <- "orderupo"
+			else
+				method <- "sortorderupo"
+		}
+		)
+	}
+  }
+  switch(method
+  , hashmapupo={
+		p <- hashmapupo(x, nunique=nunique)
+    }
+  , hashupo={
+		if (is.null(c) || !exists("hashmap", envir=c, inherits=FALSE))
+			h <- hashmap(x, nunique=nunique)
+		else
+			h <- c
+		p <- hashupo(h, keep.order=keep.order)
+		if (order=="values"){
+			s <- x[p]
+			ramsortorder(s, p, na.last=FALSE)
+		}
+    }
+  , sortorderupo={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE) || !exists("order", c, inherits=FALSE)){
+			s <- clone(x)
+			o <- seq_along(x)
+			ramsortorder(s, o, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+			o <- get("order", c, inherits=FALSE)
+		}
+		if (is.null(nunique))
+			nunique <- sortnut(s)[1]
+		p <- sortorderupo(s, o, nunique, keep.order=keep.order)
+    }
+  , orderupo={
+		if (is.null(c) || !exists("order", c, inherits=FALSE)){
+			o <- seq_along(x)
+			ramorder(x, o, na.last=FALSE)
+		}else{
+			o <- get("order", c, inherits=FALSE)
+		}
+		if (is.null(nunique))
+			nunique <- ordernut(x, o)[1]
+		p <- orderupo(x, o, nunique, keep.order=keep.order)
+    }
+  , stop("unknown method", method)
+  )
+  p
+}
+
+
+
+#! \name{table.integer64}
+#! \title{Cross Tabulation and Table Creation for integer64}
+#! \alias{table.integer64}
+#! 
+#! \concept{counts}
+#! \concept{frequencies}
+#! \concept{occurrences}
+#! \concept{contingency table}
+#! 
+#! \description{
+#!   \code{table.integer64} uses the cross-classifying integer64 vectors to build a contingency
+#!   table of the counts at each combination of vector values.
+#! }
+#! \usage{
+#! table.integer64(\dots
+#! , return = c("table","data.frame","list")
+#! , order = c("values","counts")
+#! , nunique = NULL
+#! , method = NULL
+#! , dnn = list.names(...), deparse.level = 1
+#! ) 
+#! }
+#! \arguments{
+#!   \item{\dots}{one or more objects which can be interpreted as factors
+#!     (including character strings), or a list (or data frame) whose
+#!     components can be so interpreted.  (For \code{as.table} and
+#!     \code{as.data.frame}, arguments passed to specific methods.)}
+#!   \item{nunique}{
+#! 	NULL or the number of unique values of table (including NA). Providing \code{nunique} can speed-up matching when \code{table} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+#! }
+#!   \item{order}{
+#! 	By default results are created sorted by "values", or by "counts"
+#! }
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{return}{
+#!      choose the return format, see details
+#! }
+#!   \item{dnn}{the names to be given to the dimensions in the result (the
+#!     \emph{dimnames names}).}
+#!   \item{deparse.level}{controls how the default \code{dnn} is
+#!     constructed.  See \sQuote{Details}.}
+#! }
+#! \details{
+#!   This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+#!   Suitable methods are \code{\link{hashmaptab}} (simultaneously creating and using a hashmap)
+#! , \code{\link{hashtab}} (first creating a hashmap then using it)
+#! , \code{\link{sortordertab}} (fast ordering) 
+#! and \code{\link{ordertab}} (memory saving ordering).
+#! \cr
+#!   If the argument \code{dnn} is not supplied, the internal function
+#!   \code{list.names} is called to compute the \sQuote{dimname names}.  If the
+#!   arguments in \code{\dots} are named, those names are used.  For the
+#!   remaining arguments, \code{deparse.level = 0} gives an empty name,
+#!   \code{deparse.level = 1} uses the supplied argument if it is a symbol,
+#!   and \code{deparse.level = 2} will deparse the argument.
+#! 
+#!   Arguments \code{exclude}, \code{useNA}, are not supported, i.e. \code{NA}s are always tabulated, and, different from \code{\link{table}} they are sorted first if \code{order="values"}. 
+#! }
+#! \value{
+#!   By default (with \code{return="table"}) \code{\link{table}} returns a \emph{contingency table}, an object of
+#!   class \code{"table"}, an array of integer values. Note that unlike S the result is always an array, a 1D array if one factor is given. Note also that for multidimensional arrays this is a \emph{dense} return structure which can dramatically increase RAM requirements (for large arrays with high mutual information, i.e. many possible input combinations of which only few occur) and that \code{\link{table}} is limited to \code{2^31} possible combinations (e.g. two input vectors with 463 [...]
+#!   \cr
+#!   You can use the other \code{return=} options to cope with these problems, the potential combination limit is increased from \code{2^31} to \code{2^63} with these options, RAM is only rewquired for observed combinations and string conversion is avoided. 
+#!   \cr
+#!   With \code{return="data.frame"} you get a \emph{dense} representation as a \code{\link{data.frame}} (like that resulting from \code{as.data.frame(table(...))}) where only observed combinations are listed (each as a data.frame row) with the corresponding frequency counts (the latter as component
+#!   named by \code{responseName}).  This is the inverse of \code{\link{xtabs}}..
+#!   \cr
+#!   With \code{return="list"} you also get a \emph{dense} representation as a simple \code{\link{list}} with components 
+#!   \item{values }{a integer64 vector of the technically tabulated values, for 1D this is the tabulated values themselves, for kD these are the values representing the potential combinations of input values}
+#!   \item{counts}{the frequency counts}
+#!   \item{dims}{only for kD: a list with the vectors of the unique values of the input dimensions}
+#! }
+#! \note{
+#!   Note that by using \code{\link{as.integer64.factor}} we can also input 
+#!   factors into \code{table.integer64} -- only the \code{\link{levels}} get lost.
+#!  \cr
+#!   Note that because of the existence of \code{\link{as.factor.integer64}} 
+#! the standard \code{\link{table}} function -- within its limits -- can also be used 
+#! for \code{\link{integer64}}, and especially for combining \code{\link{integer64}} input 
+#! with other data types.
+#! }
+#! \seealso{
+#!   \code{\link{table}} for more info on the standard version coping with Base R's data types, \code{\link{tabulate}} which can faster tabulate \code{\link{integer}s} with a limited range \code{[1L .. nL not too big]}, \code{\link{unique.integer64}} for the unique values without counting them and \code{\link{unipos.integer64}} for the positions of the unique values. 
+#! }
+#! \examples{
+#! message("pure integer64 examples")
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! y <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! z <- sample(c(rep(NA, 9), letters), 32, TRUE)
+#! table.integer64(x)
+#! table.integer64(x, order="counts")
+#! table.integer64(x, y)
+#! table.integer64(x, y, return="data.frame")
+#!
+#! message("via as.integer64.factor we can use 'table.integer64' also for factors")
+#! table.integer64(x, as.integer64(as.factor(z)))
+#! 
+#! message("via as.factor.integer64 we can also use 'table' for integer64")
+#! table(x)
+#! table(x, exclude=NULL)
+#! table(x, z, exclude=NULL)
+#!
+#! \dontshow{
+#!  stopifnot(identical(table.integer64(as.integer64(c(1,1,2))), table(c(1,1,2))))
+#!  stopifnot(identical(table.integer64(as.integer64(c(1,1,2)),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+#!  message("the following works with three warnings due to coercion")
+#!  stopifnot(identical(table.integer64(c(1,1,2)), table(c(1,1,2))))
+#!  stopifnot(identical(table.integer64(as.integer64(c(1,1,2)),c(3,4,4)), table(c(1,1,2),c(3,4,4))))
+#!  stopifnot(identical(table.integer64(c(1,1,2),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+#!  message("the following works because of as.factor.integer64")
+#!  stopifnot(identical(table(as.integer64(c(1,1,2))), table(c(1,1,2))))  
+#!  stopifnot(identical(table(as.integer64(c(1,1,2)),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+#!  stopifnot(identical(table(as.integer64(c(1,1,2)),c(3,4,4)), table(c(1,1,2),c(3,4,4))))
+#!  stopifnot(identical(table(c(1,1,2),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+#! }
+#!
+#! }
+#! \keyword{category}
+
+
+table.integer64 <- function(
+  ...
+, return = c("table","data.frame","list")
+, order = c("values","counts")
+, nunique = NULL
+, method = NULL
+, dnn = list.names(...), deparse.level = 1
+){
+  order <- match.arg(order)
+  return <- match.arg(return)
+  # this is taken from 'table'
+	list.names <- function(...){
+        l <- as.list(substitute(list(...)))[-1L]
+        nm <- names(l)
+        fixup <- if (is.null(nm)) 
+            seq_along(l)
+        else nm == ""
+        dep <- vapply(l[fixup], function(x) switch(deparse.level + 
+            1, "", if (is.symbol(x)) as.character(x) else "", 
+            deparse(x, nlines = 1)[1L]), "")
+        if (is.null(nm)) 
+            dep
+        else {
+            nm[fixup] <- dep
+            nm
+        }
+    }  
+
+	# COPY ON MODIFY is broken for reading from list(...)
+	# because list(...) creates a copy of all ... and this invalidates our caches
+	# therefore we go this sick workaround
+	argsymbols <- as.list(substitute(list(...)))[-1L]
+	argframe <- parent.frame()
+	A <- function(i)eval(argsymbols[[i]], argframe)
+	N <- length(argsymbols)
+	if (!N) 
+		stop("nothing to tabulate")
+	if (N == 1L && is.list(A(1L))){
+		args <- A(1L)
+		if (length(dnn) != length(args)) 
+            dnn <- if (!is.null(argn <- names(args))) argn
+				else paste(dnn[1L], seq_along(args), sep = ".")		
+		N <- length(args)
+		A <- function(i)args[[i]]
+	}
+	force(dnn)
+		
+	if (N==1L){
+		x <- A(1L)
+			if (!is.integer64(x)){
+				warning("coercing first argument to integer64")
+				x <- as.integer64(x)
+			}
+	}else{
+		a <- A(1L)
+		n <- length(a)
+		nu <- integer(N)
+		d <- integer64(N+1); d[[1]] <- 1L
+		dims <- vector("list", N)
+		names(dims) <- dnn
+		for (i in 1:N){
+			a <- A(i)
+			if (length(a) != n) 
+				stop("all input vectors must have the same length")
+			if (!is.integer64(a)){
+				warning("coercing argument ", i, " to integer64")
+				a <- as.integer64(a)
+			}
+			c <- cache(a)
+			if (is.null(c$order)){
+				s <- clone(a)
+				o <- seq_along(s)
+				ramsortorder(s,o)
+				nu[[i]] <- sortnut(s)[["nunique"]]
+			}else if (is.null(c$sort)){
+				o <- c$order
+				s <- a[o]
+				nu[[i]] <- c$nunique
+			}else{
+				o <- c$order
+				s <- c$sort
+				nu[[i]] <- c$nunique
+			}
+			d[[i+1]] <- d[[i]] * nu[[i]]
+			if (is.na(d[[i+1]]))
+				stop("attempt to make a table from more than >= 2^63 hypothetical combinations")
+			dims[[i]] <- sortuni(s, nu[[i]])
+			if (i==1L)
+				x <- sortorderkey(s,o) - 1L
+			else
+				x <- x + d[[i]] * (sortorderkey(s,o) - 1L)
+		}
+	}
+  c <- cache(x)
+  if (is.null(nunique) && !is.null(c))
+	nunique <- c$nunique
+  if (is.null(method)){
+    if (is.null(c)){
+		if (order=="values" && (is.null(nunique) || nunique>65536))
+			method <- "sorttab"
+		else
+			method <- "hashmaptab"
+	}else{
+		if (order=="values"){
+			if (exists("sort", envir=c, inherits=FALSE))
+				method <- "sorttab"
+			else if (exists("hashmap", envir=c, inherits=FALSE) && c$nunique<sqrt(length(x)))
+				method <- "hashtab"
+			else if (exists("order", envir=c, inherits=FALSE))
+				method <- "ordertab"
+			else
+				method <- "sorttab"
+		}else{ # order = "counts"
+			if (exists("hashmap", envir=c, inherits=FALSE))
+				method <- "hashtab"
+			else if (exists("sort", envir=c, inherits=FALSE))
+				method <- "sorttab"
+			else if (exists("order", envir=c, inherits=FALSE))
+				method <- "ordertab"
+			else
+				method <- "hashmaptab"
+		}
+	}
+  }
+  switch(method
+  , hashmaptab={
+		tmp <- hashmaptab(x, nunique=nunique)
+		cnt <- tmp$counts
+		val <- tmp$values
+		rm(tmp)
+    }
+  , hashtab={
+		if (is.null(c) || !exists("hashmap", envir=c, inherits=FALSE))
+			h <- hashmap(x, nunique=nunique)
+		else
+			h <- c 
+		tmp <- hashtab(h, keep.order=FALSE)
+		cnt <- tmp$counts
+		val <- tmp$values
+		rm(tmp)
+    }
+  , sorttab={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE)){
+			s <- clone(x)
+			ramsort(s, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+		}
+		if (is.null(nunique))
+			nunique <- sortnut(s)[1]
+		val <- sortuni(s, nunique)
+		cnt <- sorttab(s, nunique)
+    }
+  , ordertab={
+		if (is.null(c) || !exists("order", c, inherits=FALSE)){
+			o <- seq_along(x)
+			ramorder(x, o, na.last=FALSE)
+		}else{
+			o <- get("order", c, inherits=FALSE)
+		}
+		if (is.null(nunique))
+			nunique <- ordernut(x, o)[1]
+		val <- orderuni(x, o, nunique, keep.order=FALSE)
+		cnt <- ordertab(x, o, nunique, keep.order=FALSE)
+		rm(o)
+    }
+  , stop("unknown method", method)
+  )
+	if (order=="values"){
+		if (substr(method, 1, 4)=="hash"){
+			o <- seq_along(val)
+			ramsortorder(val, o, na.last=FALSE)
+			cnt <- cnt[o]
+		}
+	}else{
+		# xx workaround until we have implemented ramsort.integer
+		o <- sort.list(cnt, na.last=NA, method="quick")
+		cnt <- cnt[o]
+		# o <- seq_along(cnt)
+		# ramsortorder(cnt, o, na.last=FALSE)
+		val <- val[o]
+	}
+  ## attaching names is extremely expensive with many unique values, doing this only for compatibility with 'table' here
+  switch(return
+  ,  "table" = {
+  		if (N==1){
+			attr(cnt, "dim") <-length(cnt)
+			dn <- list(as.character(val))
+			names(dn) <- dnn[1]
+			attr(cnt, "dimnames") <- dn
+		}else{
+			a <- array(0L, dim=nu, dimnames=lapply(dims, as.character))
+			a[as.integer(val)+1L] <- as.integer(cnt)
+			cnt <- a
+		}
+		oldClass(cnt) <- "table"
+	}
+  ,  "data.frame" = {
+		if (N==1){
+			cnt <- data.frame(values=val, Freq=cnt)
+			names(cnt)[[1]] <- dnn[1]
+		}else{
+			for (i in N:1){
+				w <- val %/% d[[i]]
+				val <- val - d[[i]]*w
+				dims[[i]] <- dims[[i]][as.integer(w)+1L]
+			}
+			cnt <- data.frame(dims, Freq=cnt)
+		}
+	}
+  , "list" = {
+		if (N==1)
+			cnt <- list(values=val, counts=cnt)
+		else
+			cnt <- list(values=val, counts=cnt, dims=dims)
+    }
+  )
+  cnt
+}
+
+
+as.factor.integer64 <- function(x){
+
+	c <- cache(x)
+	if (is.null(c$order)){
+		s <- clone(x)
+		o <- seq_along(s)
+		na.count <- ramsortorder(s,o)
+		nu <- sortnut(s)[["nunique"]]
+	}else if (is.null(c$sort)){
+		o <- c$order
+		s <- x[o]
+		na.count <- c$na.count
+		nu <- c$nunique
+	}else{
+		o <- c$order
+		s <- c$sort
+		na.count <- c$na.count
+		nu <- c$nunique
+	}
+	dimtab <- sortuni(s, nu)
+	dimpos <- sortorderkey(s,o,na.skip.num=na.count) - 1L
+	attr(dimpos, "levels") <- dimtab
+	oldClass(dimpos) <- "factor"
+	dimpos
+}
+
+as.ordered.integer64 <- function(x){
+
+	c <- cache(x)
+	if (is.null(c$order)){
+		s <- clone(x)
+		o <- seq_along(s)
+		na.count <- ramsortorder(s,o)
+		nu <- sortnut(s)[["nunique"]]
+	}else if (is.null(c$sort)){
+		o <- c$order
+		s <- x[o]
+		na.count <- c$na.count
+		nu <- c$nunique
+	}else{
+		o <- c$order
+		s <- c$sort
+		na.count <- c$na.count
+		nu <- c$nunique
+	}
+	dimtab <- sortuni(s, nu)
+	dimpos <- sortorderkey(s,o,na.skip.num=na.count) - 1L
+	attr(dimpos, "levels") <- dimtab
+	oldClass(dimpos) <- c("ordered", "factor")
+	dimpos
+}
+
+as.integer64.factor <- function(x, ...)as.integer64(unclass(x))
+
+
+
+#! \name{keypos}
+#! \alias{keypos}
+#! \alias{keypos.integer64}
+#! \title{Extract Positions in redundant dimension table}
+#! \description{
+#!   \code{keypos} returns the positions of the (fact table) elements that participate in their sorted unique subset (dimension table)
+#! }
+#! \usage{
+#! keypos(x, \dots)
+#! \method{keypos}{integer64}(x, method = NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{a vector or a data frame or an array or \code{NULL}.}
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{\dots}{ignored}
+#! }
+#! \details{
+#!   NAs are sorted first in the dimension table, see \code{\link{ramorder.integer64}}.
+#!   \cr
+#!   This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+#!   Suitable methods are \code{\link{sortorderkey}} (fast ordering) 
+#! and \code{\link{orderkey}} (memory saving ordering).
+#! }
+#! \value{
+#!   an integer vector of the same length as code{x} containing positions relativ to code{sort(unique(x), na.last=FALSE)}
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#!   \code{\link{unique.integer64}} for the unique subset and \code{\link{match.integer64}} for finding positions in a different vector.
+#! }
+#! \examples{
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! keypos(x)
+#! 
+#! stopifnot(identical(keypos(x),  match.integer64(x, sort(unique(x), na.last=FALSE))))
+#! }
+#! \keyword{manip}
+#! \keyword{univar}
+
+
+
+keypos <- function(x, ...)UseMethod("keypos")
+keypos.integer64 <- function(x
+, method = NULL
+, ...
+){
+  c <- cache(x)
+  if (is.null(method)){
+    if (is.null(c)){
+		method <- "sortorderkey"
+	}else{
+		if (exists("order", envir=c, inherits=FALSE)){
+			if (exists("sort", envir=c, inherits=FALSE))
+				method <- "sortorderkey"
+			else
+				method <- "orderkey"
+		}else
+			method <- "sortorderkey"
+	}
+  }
+  switch(method
+  , sortorderkey={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE) || !exists("order", c, inherits=FALSE)){
+			s <- clone(x)
+			o <- seq_along(x)
+			ramsortorder(s, o, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+			o <- get("order", c, inherits=FALSE)
+		}
+		p <- sortorderkey(s, o)
+    }
+  , orderkey={
+		if (is.null(c) || !exists("order", c, inherits=FALSE)){
+			o <- seq_along(x)
+			ramorder(x, o, na.last=FALSE)
+		}else{
+			o <- get("order", c, inherits=FALSE)
+		}
+		p <- orderkey(x, o)
+    }
+  , stop("unknown method", method)
+  )
+  p
+}
+
+#! \name{tiepos}
+#! \alias{tiepos}
+#! \alias{tiepos.integer64}
+#! \title{Extract Positions of Tied Elements}
+#! \description{
+#!   \code{tiepos} returns the positions of those elements that participate in ties.
+#! }
+#! \usage{
+#! tiepos(x, \dots)
+#! \method{tiepos}{integer64}(x, nties = NULL, method = NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{a vector or a data frame or an array or \code{NULL}.}
+#!   \item{nties}{
+#! 	NULL or the number of tied values (including NA). Providing \code{nties} can speed-up when \code{x} has no cache. Note that a wrong nties can cause undefined behaviour up to a crash.
+#! }
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{\dots}{ignored}
+#! }
+#! \details{
+#!   This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+#!   Suitable methods are \code{\link{sortordertie}} (fast ordering) 
+#! and \code{\link{ordertie}} (memory saving ordering).
+#! }
+#! \value{
+#!   an integer vector of positions
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#!   \code{\link{rank.integer64}} for possibly tied ranks and \code{\link{unipos.integer64}} for positions of unique values.
+#! }
+#! \examples{
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! tiepos(x)
+#! 
+#! stopifnot(identical(tiepos(x),  (1:length(x))[duplicated(x) | rev(duplicated(rev(x)))]))
+#! }
+#! \keyword{manip}
+#! \keyword{univar}
+
+
+tiepos <- function(x, ...)UseMethod("tiepos")
+tiepos.integer64 <- function(x
+, nties = NULL
+, method = NULL
+, ...
+){
+  c <- cache(x)
+  if (is.null(nties) && !is.null(c))
+	nties <- c$nties
+  if (is.null(method)){
+    if (is.null(c)){
+		method <- "sortordertie"
+	}else{
+		if (exists("order", envir=c, inherits=FALSE)){
+			if (exists("sort", envir=c, inherits=FALSE))
+				method <- "sortordertie"
+			else
+				method <- "ordertie"
+		}else
+			method <- "sortordertie"
+	}
+  }
+  switch(method
+  , sortordertie={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE) || !exists("order", c, inherits=FALSE)){
+			s <- clone(x)
+			o <- seq_along(x)
+			ramsortorder(s, o, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+			o <- get("order", c, inherits=FALSE)
+		}
+		if (is.null(nties))
+			nties <- sortnut(s)[2]
+		p <- sortordertie(s, o, nties)
+    }
+  , ordertie={
+		if (is.null(c) || !exists("order", c, inherits=FALSE)){
+			o <- seq_along(x)
+			ramorder(x, o, na.last=FALSE)
+		}else{
+			o <- get("order", c, inherits=FALSE)
+		}
+		if (is.null(nties))
+			nties <- ordernut(x, o)[2]
+		p <- ordertie(x, o, nties)
+    }
+  , stop("unknown method", method)
+  )
+  p
+}
+
+
+#! \name{rank.integer64}
+#! \alias{rank.integer64}
+#! \title{Sample Ranks from integer64}
+#! \description{
+#!   Returns the sample ranks of the values in a vector.  Ties (i.e., equal
+#!   values) are averaged and missing values propagated.
+#! }
+#! \usage{
+#! 	\method{rank}{integer64}(x, method = NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{a integer64 vector}
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{\dots}{ignored}
+#! }
+#! \details{
+#!   This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+#!   Suitable methods are \code{\link{sortorderrnk}} (fast ordering) 
+#! and \code{\link{orderrnk}} (memory saving ordering).
+#! }
+#! \value{
+#!   A numeric vector of the same length as \code{x}.
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#!   \code{\link{order.integer64}}, \code{\link{rank}} and \code{\link{prank}} for percent rank.
+#! }
+#! \examples{
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! rank.integer64(x)
+#! 
+#! stopifnot(identical(rank.integer64(x),  rank(as.integer(x)
+#! , na.last="keep", ties.method = "average")))
+#! }
+#! \keyword{univar}
+
+rank.integer64 <- function(x
+, method = NULL
+, ...
+){
+  c <- cache(x)
+  if (is.null(method)){
+    if (is.null(c)){
+		method <- "sortorderrnk"
+	}else{
+		if (exists("order", envir=c, inherits=FALSE)){
+			if (exists("sort", envir=c, inherits=FALSE))
+				method <- "sortorderrnk"
+			else
+				method <- "orderrnk"
+		}else
+			method <- "sortorderrnk"
+	}
+  }
+  switch(method
+  , sortorderrnk={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE) || !exists("order", c, inherits=FALSE)){
+			s <- clone(x)
+			o <- seq_along(x)
+			na.count <- ramsortorder(s, o, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+			o <- get("order", c, inherits=FALSE)
+			na.count <- get("na.count", c, inherits=FALSE)
+		}
+		p <- sortorderrnk(s, o, na.count)
+    }
+  , orderrnk={
+		if (is.null(c) || !exists("order", c, inherits=FALSE)){
+			o <- seq_along(x)
+			na.count <- ramorder(x, o, na.last=FALSE)
+		}else{
+			o <- get("order", c, inherits=FALSE)
+			na.count <- get("na.count", c, inherits=FALSE)
+		}
+		p <- orderrnk(x, o, na.count)
+    }
+  , stop("unknown method", method)
+  )
+  p
+}
+
+#! \name{prank}
+#! \alias{prank}
+#! \alias{prank.integer64}
+#! \title{(P)ercent (Rank)s}
+#! \description{
+#! 	Function \code{prank.integer64}  projects the values [min..max] via ranks [1..n] to [0..1]. 
+#! 	\code{\link{qtile.integer64}} is the inverse function of 'prank.integer64' and projects [0..1] to [min..max].
+#! }
+#! \usage{
+#! 	prank(x, \dots)
+#! 	\method{prank}{integer64}(x, method = NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{a integer64 vector}
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{\dots}{ignored}
+#! }
+#! \details{
+#! 	Function \code{prank.integer64} is based on \code{\link{rank.integer64}}.
+#! }
+#! \value{
+#!   \code{prank} returns a numeric vector of the same length as \code{x}.
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#!   \code{\link{rank.integer64}} for simple ranks and \code{\link{qtile}} for the inverse function quantiles.
+#! }
+#! \examples{
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! prank(x)
+#! 
+#! x <- x[!is.na(x)]
+#! stopifnot(identical(x,  unname(qtile(x, probs=prank(x)))))
+#! }
+#! \keyword{univar}
+
+prank <- function(x, ...)UseMethod("prank")
+prank.integer64 <- function(x
+, method = NULL
+, ...
+)
+{	
+	n <- nvalid(x)
+	if (n<2L)
+		return(rep(as.integer64(NA), length(x)))
+	(rank.integer64(x, method=method, ...)-1L)/(n-1L)
+}
+
+#! \name{qtile}
+#! \alias{qtile}
+#! \alias{qtile.integer64}
+#! \alias{quantile.integer64}
+#! \alias{median.integer64}
+#! \alias{mean.integer64}
+#! \alias{summary.integer64}
+#! \title{(Q)uan(Tile)s }
+#! \description{
+#! 	Function \code{\link{prank.integer64}}  projects the values [min..max] via ranks [1..n] to [0..1]. 
+#! 	\code{qtile.ineger64} is the inverse function of 'prank.integer64' and projects [0..1] to [min..max].
+#! }
+#! \usage{
+#! 	qtile(x, probs=seq(0, 1, 0.25), \dots)
+#! 	\method{qtile}{integer64}(x, probs = seq(0, 1, 0.25), names = TRUE, method = NULL, \dots)
+#! 	\method{quantile}{integer64}(x, probs = seq(0, 1, 0.25), na.rm = FALSE, names = TRUE, type=0L, \dots)
+#!  \method{mean}{integer64}(x, na.rm = FALSE, \dots)
+#! 	\method{summary}{integer64}(object, \dots)
+#!  ## mean(x, na.rm = FALSE, ...)
+#!  ## or
+#!  ## mean(x, na.rm = FALSE)
+#! }
+#! \arguments{
+#!   \item{x}{a integer64 vector}
+#!   \item{object}{a integer64 vector}
+#!   \item{probs}{
+#! 		numeric vector of probabilities with values in [0,1] - possibly containing \code{NA}s
+#! }
+#!   \item{names}{
+#! 	logical; if \code{TRUE}, the result has a \code{names} attribute. Set to \code{FALSE} for speedup with many probs.
+#! }
+#!   \item{type}{
+#! 	an integer selecting the quantile algorithm, currently only 0 is supported, see details
+#! }
+#!   \item{method}{
+#! 	NULL for automatic method selection or a suitable low-level method, see details
+#! }
+#!   \item{na.rm}{
+#! 	logical; if \code{TRUE}, any \code{NA} and \code{NaN}'s are removed from \code{x} before the quantiles are computed.
+#! }
+#!   \item{\dots}{ignored}
+#! }
+#! \details{
+#!  Functions \code{quantile.integer64} with \code{type=0} and \code{median.integer64} are convenience wrappers to \code{qtile}.
+#!  \cr
+#!	Function \code{qtile} behaves very similar to \code{quantile.default} with \code{type=1} 
+#!  in that it only returns existing values, it is mostly symetric 
+#!  but it is using 'round' rather than 'floor'. 
+#!  \cr
+#!  Note that this implies that \code{median.integer64} does not interpolate for even number of values 
+#! (interpolation would create values that could not be represented as 64-bit integers).
+#!  \cr
+#!   This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+#!   Suitable methods are \code{\link{sortqtl}} (fast sorting) 
+#! and \code{\link{orderqtl}} (memory saving ordering).
+#! }
+#! \value{
+#!   \code{prank} returns a numeric vector of the same length as \code{x}.
+#!   \cr
+#!   \code{qtile} returns a vector with elements from \code{x} 
+#!   at the relative positions specified by \code{probs}.
+#! }
+#! \author{
+#! 	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \seealso{
+#!   \code{\link{rank.integer64}} for simple ranks and \code{\link{quantile}} for quantiles.
+#! }
+#! \examples{
+#! x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#! qtile(x, probs=seq(0, 1, 0.25))
+#! quantile(x, probs=seq(0, 1, 0.25), na.rm=TRUE)
+#! median(x, na.rm=TRUE)
+#! summary(x)
+#! 
+#! x <- x[!is.na(x)]
+#! stopifnot(identical(x,  unname(qtile(x, probs=prank(x)))))
+#! }
+#! \keyword{univar}
+
+qtile <- function(x, probs = seq(0, 1, 0.25), ...)UseMethod("qtile")
+qtile.integer64 <- function(x, probs = seq(0, 1, 0.25), names = TRUE, method = NULL, ...){
+	if (any(is.na(probs) | probs<0 | probs>1))
+		stop("p outside [0,1]")
+  c <- cache(x)
+  if (is.null(method)){
+    if (is.null(c)){
+		method <- "sortqtl"
+	}else{
+		if (exists("sort", envir=c, inherits=FALSE))
+			method <- "sortqtl"
+		else if (exists("order", envir=c, inherits=FALSE))
+			method <- "orderqtl"
+		else
+			method <- "sortqtl"
+	}
+  }
+  switch(method
+  , sortqtl={
+		if (is.null(c) || !exists("sort", c, inherits=FALSE)){
+			s <- clone(x)
+			na.count <- ramsort(s, na.last=FALSE)
+		}else{
+			s <- get("sort", c, inherits=FALSE)
+			na.count <- get("na.count", c, inherits=FALSE)
+		}
+		qs <- sortqtl(s, na.count, probs)
+    }
+  , orderqtl={
+		if (is.null(c) || !exists("order", c, inherits=FALSE)){
+			o <- seq_along(x)
+			na.count <- ramorder(x, o, na.last=FALSE)
+		}else{
+			o <- get("order", c, inherits=FALSE)
+			na.count <- get("na.count", c, inherits=FALSE)
+		}
+		qs <- orderqtl(x, o, na.count, probs)
+    }
+  , stop("unknown method", method)
+  )
+  if (names){
+	np <- length(probs)
+	dig <- max(2L, getOption("digits"))
+	names(qs) <- paste(if (np < 100) 
+		formatC(100 * probs, format = "fg", width = 1, digits = dig)
+	else format(100 * probs, trim = TRUE, digits = dig), 
+		"%", sep = "")
+  }
+  qs
+}
+
+
+quantile.integer64 <- function(x, probs = seq(0, 1, 0.25), na.rm = FALSE, names = TRUE, type=0L, ...){
+	if (type[[1]]!=0L)
+		stop("only type==0 ('qtile') supported")
+	if (!na.rm && na.count(x)>0)
+		stop("missing values not allowed with 'na.rm='==FALSE")
+	qtile.integer64(x, probs = probs, na.rm = na.rm, names = names, ...)
+}
+
+
+# adding ... (wish of Kurt Hornik 23.3.2017)
+if (is.na(match("...", names(formals(median))))){
+	median.integer64 <- function(x, na.rm=FALSE){
+		if (!na.rm && na.count(x)>0)
+			stop("missing values not allowed with 'na.rm='==FALSE")
+		qtile.integer64(x, probs = 0.5, na.rm = na.rm, names = FALSE)
+	}
+}else{
+	median.integer64 <- function(x, na.rm=FALSE, ...){
+		if (!na.rm && na.count(x)>0)
+			stop("missing values not allowed with 'na.rm='==FALSE")
+		qtile.integer64(x, probs = 0.5, na.rm = na.rm, names = FALSE)
+	}
+}
+	
+# mean.integer64 <- function(x, na.rm=FALSE){
+	# s <- sum(x, na.rm=na.rm)
+	# if (!is.na(s)){
+		# if (na.rm)
+			# s <- s%/%(length(x)-na.count(x))
+		# else
+			# s <- s%/%length(x)
+	# }
+	# s
+# }
+mean.integer64 <- function(x, na.rm=FALSE, ...){
+	ret <- double(1)
+	.Call(C_mean_integer64, x, as.logical(na.rm), ret)
+	oldClass(ret) <- "integer64"
+	ret
+}
+
+summary.integer64 <- function (object, ...){
+	nas <- na.count(object)
+	qq <- quantile(object, na.rm=TRUE)
+	qq <- c(qq[1L:3L], mean(object, na.rm=TRUE), qq[4L:5L])
+    names(qq) <- c("Min.", "1st Qu.", "Median", "Mean", "3rd Qu.", "Max.")
+	if (any(nas)) 
+		c(qq, "NA's" = nas)
+	else qq
+}
diff --git a/R/integer64.R b/R/integer64.R
new file mode 100644
index 0000000..beb512d
--- /dev/null
+++ b/R/integer64.R
@@ -0,0 +1,2443 @@
+# /*
+# R-Code
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+#*/
+
+#! \name{bit64-package}
+#! \alias{bit64-package}
+#! \alias{bit64}
+#! \alias{integer64}
+#! \alias{is.integer64}
+#! \alias{is.integer.integer64}
+#! \alias{is.vector.integer64}
+#! %as.vector.integer64 removed as requested by the CRAN maintainer \alias{as.vector.integer64}
+#! \alias{length<-.integer64}
+#! \alias{print.integer64}
+#! \alias{str.integer64}
+#! \docType{package}
+#! \title{
+#!    A S3 class for vectors of 64bit integers
+#! }
+#! \description{
+#! Package 'bit64' provides fast serializable S3 atomic 64bit (signed) integers 
+#! that can be used in vectors, matrices, arrays and data.frames. Methods are 
+#! available for coercion from and to logicals, integers, doubles, characters  
+#! and factors as well as many elementwise and summary functions. 
+#! \cr
+#! \bold{Version 0.8}
+#! With 'integer64' vectors you can store very large integers at the expense
+#! of 64 bits, which is by factor 7 better than 'int64' from package 'int64'.
+#! Due to the smaller memory footprint, the atomic vector architecture and  
+#! using only S3 instead of S4 classes, most operations are one to three orders 
+#! of magnitude faster: Example speedups are 4x for serialization, 250x for 
+#! adding, 900x for coercion and 2000x for object creation. Also 'integer64' 
+#! avoids an ongoing (potentially infinite) penalty for garbage collection
+#! observed during existence of 'int64' objects (see code in example section). 
+#! \cr
+#! \bold{Version 0.9}
+#! Package 'bit64' - which extends R with fast 64-bit integers - now has fast
+#! (single-threaded) implementations the most important univariate algorithmic 
+#! operations (those based on hashing and sorting). We now have methods for 
+#! 'match', '%in%', 'duplicated', 'unique', 'table', 'sort', 'order', 'rank', 
+#! 'quantile', 'median' and 'summary'. Regarding data management we also have 
+#! novel generics 'unipos' (positions of the unique values), 'tiepos' (
+#! positions of ties), 'keypos' (positions of foreign keys in a sorted 
+#! dimension table) and derived methods 'as.factor' and 'as.ordered'. This 64-
+#! bit functionality is implemented carefully to be not slower than the 
+#! respective 32-bit operations in Base R and also to avoid outlying waiting 
+#! times observed with 'order', 'rank' and 'table' (speedup factors 20/16/200 
+#! respective). This increases the dataset size with wich we can work truly 
+#! interactive. The speed is achieved by simple heuristic optimizers in high-
+#! level functions choosing the best from multiple low-level algorithms and 
+#! further taking advantage of a novel caching if activated. In an example R 
+#! session using a couple of these operations the 64-bit integers performed 22x
+#!  faster than base 32-bit integers, hash-caching improved this to 24x, 
+#! sortorder-caching was most efficient with 38x (caching hashing and sorting 
+#! is not worth it with 32x at duplicated RAM consumption).
+#! }
+#! \usage{
+#!  integer64(length)
+#!  \method{is}{integer64}(x)
+#!  \method{length}{integer64}(x) <- value
+#!  \method{print}{integer64}(x, quote=FALSE, \dots)
+#!  \method{str}{integer64}(object, vec.len  = strO$vec.len, give.head = TRUE, give.length = give.head, \dots)
+#! }
+#! \arguments{
+#!   \item{length}{ length of vector using \code{\link{integer}} }
+#!   \item{x}{ an integer64 vector }
+#!   \item{object}{ an integer64 vector }
+#!   \item{value}{ an integer64 vector of values to be assigned }
+#!   \item{quote}{ logical, indicating whether or not strings should be printed with surrounding quotes. }
+#!   \item{vec.len}{ see \code{\link[utils]{str}} }
+#!   \item{give.head}{ see \code{\link[utils]{str}} }
+#!   \item{give.length}{ see \code{\link[utils]{str}} }
+#!   \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+#! }
+#! \details{
+#! \tabular{ll}{
+#!    Package: \tab bit64\cr
+#!    Type: \tab Package\cr
+#!    Version: \tab 0.5.0\cr
+#!    Date: \tab 2011-12-12\cr
+#!    License: \tab GPL-2\cr
+#!    LazyLoad: \tab yes\cr
+#!    Encoding: \tab latin1\cr
+#! }
+#! }
+#! \section{Design considerations}{
+#!   64 bit integers are related to big data: we need them to overcome address space limitations. 
+#!   Therefore performance of the 64 bit integer type is critical. 
+#!   In the S language -- designed in 1975 -- atomic objects were defined to be vectors for a couple of good reasons:
+#!   simplicity, option for implicit parallelization, good cache locality. 
+#!   In recent years many analytical databases have learnt that lesson: column based data bases provide superior performance
+#!   for many applications, the result are products such as MonetDB, Sybase IQ, Vertica, Exasol, Ingres Vectorwise.
+#!   If we introduce 64 bit integers not natively in Base R but as an external package, we should at least strive to 
+#!   make them as 'basic' as possible. Therefore the design choice of bit64 not only differs from \code{int64}, it is obvious: 
+#!   Like the other atomic types in Base R, we model data type 'integer64' as a contiguous \code{\link{atomic}} vector in memory, 
+#!   and we use the more basic \code{\link{S3}} class system, not \code{\link{S4}}. Like package \code{int64} we want our 'integer64' to be \code{\link{serialize}able}, 
+#!   therefore we also use an existing data type as the basis. Again the choice is obvious: R has only one 64 bit data type: doubles.
+#!   By using \code{\link{double}s}, \code{integer64} \code{\link{inherits}} some functionality such as \code{\link{is.atomic}}, \code{\link{length}}, 
+#!   \code{\link{length<-}}, \code{\link{names}}, \code{\link{names<-}}, \code{\link{dim}}, \code{\link{dim<-}}, \code{\link{dimnames}}, \code{\link{dimnames}}.
+#!   \cr
+#!   Our R level functions strictly follow the functional programming paragdim: 
+#!   no modification of arguments or other sideffects. Before version 0.93  we internally deviated from the strict paradigm
+#!   in order to boost performance. Our C functions do not create new return values, 
+#!   instead we pass-in the memory to be returned as an argument. This gives us the freedom to apply the C-function 
+#!   to new or old vectors, which helps to avoid unnecessary memory allocation, unnecessary copying and unnessary garbage collection.
+#!   Prior to 0.93 \emph{within} our R functions we also deviated from conventional R programming by not using \code{\link{attr<-}} and \code{\link{attributes<-}} 
+#!   because they always did new memory allocation and copying in older R versions. If we wanted to set attributes of return values that we have freshly created,
+#!   we instead used functions \code{\link[bit]{setattr}} and \code{\link[bit]{setattributes}} from package \code{\link[bit]{bit}}. 
+#!   From version 0.93 \code{\link[bit]{setattr}} is only used for manipulating \code{\link{cache}} objects, in \code{\link{ramsort.integer64}} and \code{\link{sort.integer64}} and in \code{\link{as.data.frame.integer64}}.
+#! }
+#! \section{Arithmetic precision and coercion}{
+#!   The fact that we introduce 64 bit long long integers -- without introducing 128-bit long doubles -- creates some subtle challenges:
+#!   Unlike 32 bit \code{\link{integer}s}, the \code{integer64} are no longer a proper subset of \code{\link{double}}. 
+#!   If a binary arithmetic operation does involve a \code{double} and a \code{integer}, it is a no-brainer to return \code{double} 
+#!   without loss of information. If an \code{integer64} meets a \code{double}, it is not trivial what type to return. 
+#!   Switching to \code{integer64} limits our ability to represent very large numbers, switching to \code{double} limits our ability 
+#!   to distinguish \code{x} from \code{x+1}. Since the latter is the purpose of introducing 64 bit integers, we usually return \code{integer64} 
+#!   from functions involving \code{integer64}, for example in \code{\link[=c.integer64]{c}}, \code{\link[=cbind.integer64]{cbind}} 
+#!   and \code{\link[=rbind.integer64]{rbind}}. 
+#!   \cr
+#!   Different from Base R, our operators \code{\link[=+.integer64]{+}}, 
+#!   \code{\link[=-.integer64]{-}}, \code{\link[=\%/\%.integer64]{\%/\%}} and \code{\link[=\%\%.integer64]{\%\%}} coerce their arguments to 
+#!   \code{integer64} and always return \code{integer64}. 
+#!   \cr
+#!   The multiplication operator \code{\link[=*.integer64]{*}} coerces its first argument to \code{integer64} 
+#!   but allows its second argument to be also \code{double}: the second argument is internaly coerced to 'long double' 
+#!   and the result of the multiplication is returned as \code{integer64}. 
+#!   \cr
+#!   The division \code{\link[=/.integer64]{/}} and power \code{\link[=^.integer64]{^}} operators also coerce their first argument to \code{integer64} 
+#!   and coerce internally their second argument to 'long double', they return as \code{double}, like \code{\link[=sqrt.integer64]{sqrt}}, 
+#!   \code{\link[=log.integer64]{log}}, \code{\link[=log2.integer64]{log2}} and \code{\link[=log10.integer64]{log10}} do. 
+#!
+#!  \tabular{ccccccccc}{
+#!   \bold{argument1} \tab \bold{op} \tab \bold{argument2} \tab \bold{->} \tab \bold{coerced1} \tab \bold{op} \tab \bold{coerced2} \tab \bold{->} \tab \bold{result} \cr
+#!   integer64 \tab + \tab double \tab -> \tab integer64 \tab + \tab integer64 \tab -> \tab integer64 \cr
+#!   double \tab + \tab integer64 \tab -> \tab integer64 \tab + \tab integer64 \tab -> \tab integer64 \cr
+#!   integer64 \tab - \tab double \tab -> \tab integer64 \tab - \tab integer64 \tab -> \tab integer64 \cr
+#!   double \tab - \tab integer64 \tab -> \tab integer64 \tab - \tab integer64 \tab -> \tab integer64 \cr
+#!   integer64 \tab \%/\% \tab double \tab -> \tab integer64 \tab \%/\% \tab integer64 \tab -> \tab integer64 \cr
+#!   double \tab \%/\% \tab integer64 \tab -> \tab integer64 \tab \%/\% \tab integer64 \tab -> \tab integer64 \cr
+#!   integer64 \tab \%\% \tab double \tab -> \tab integer64 \tab \%\% \tab integer64 \tab -> \tab integer64 \cr
+#!   double \tab \%\% \tab integer64 \tab -> \tab integer64 \tab \%\% \tab integer64 \tab -> \tab integer64 \cr
+#!   integer64 \tab * \tab double \tab -> \tab integer64 \tab * \tab long double \tab -> \tab integer64 \cr
+#!   double \tab * \tab integer64 \tab -> \tab integer64 \tab * \tab integer64 \tab -> \tab integer64 \cr
+#!   integer64 \tab / \tab double \tab -> \tab integer64 \tab / \tab long double \tab -> \tab double \cr
+#!   double \tab / \tab integer64 \tab -> \tab integer64 \tab / \tab long double \tab -> \tab double \cr
+#!   integer64 \tab ^ \tab double \tab -> \tab integer64 \tab / \tab long double \tab -> \tab double \cr
+#!   double \tab ^ \tab integer64 \tab -> \tab integer64 \tab / \tab long double \tab -> \tab double \cr
+#!  }
+#! }
+#! \section{Creating and testing S3 class 'integer64'}{
+#!   Our creator function \code{integer64} takes an argument \code{length}, creates an atomic double vector of this length,
+#!   attaches an S3 class attribute 'integer64' to it, and that's it. We simply rely on S3 method dispatch and interpret those 
+#!   64bit elements as 'long long int'. 
+#!   \cr
+#!  \code{\link{is.double}} currently returns TRUE for \code{integer64} and might return FALSE in a later release.
+#!  Consider \code{is.double} to have undefined behaviour and do query \code{is.integer64} \emph{before} querying \code{is.double}.
+#! %As a second line of defense against misinterpretation we make \code{\link{is.double}}
+#! %return \code{FALSE} by making it S3 generic and adding a method \code{\link{as.double.integer64}}. 
+#!   The methods \code{\link{is.integer64}} and \code{\link{is.vector}} both return \code{TRUE} for \code{integer64}. 
+#!  Note that we did not patch \code{\link{storage.mode}} and \code{\link{typeof}}, which both continue returning 'double' 
+#!  Like for 32 bit \code{\link{integer}}, \code{\link{mode}} returns 'numeric' and \code{\link{as.double}}) tries coercing to \code{\link{double}}).
+#!  It is likely that 'integer64' becomes a \code{\link[ff]{vmode}} in package \code{\link[ff]{ff}}. 
+#!  \cr
+#!  Further methods for creating \code{integer64} are \code{\link[=range.integer64]{range}} which returns the range of the data type if calles without arguments,
+#!  \code{\link[=rep.integer64]{rep}}, \code{\link[=seq.integer64]{seq}}. 
+#!  \cr
+#!  For all available methods on \code{integer64} vectors see the index below and the examples.
+#! }
+#! \section{Index of implemented methods}{
+#! \tabular{rrl}{
+#!    \bold{creating,testing,printing} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{NA_integer64_} \tab \code{\link{NA_integer_}} \tab NA constant \cr
+#!    \code{integer64} \tab \code{\link{integer}} \tab create zero atomic vector \cr
+#!    \code{\link{rep.integer64}} \tab \code{\link{rep}} \tab  \cr
+#!    \code{\link{seq.integer64}} \tab \code{\link{seq}} \tab  \cr
+#!    \code{\link{is.integer64}} \tab \code{\link{is}} \tab  \cr
+#!                                      \tab \code{\link{is.integer}} \tab inherited from Base R \cr
+#!    %\code{\link{is.double.integer64}} \tab \code{\link{is.double}} \tab  \cr
+#!    \code{\link{is.vector.integer64}} \tab \code{\link{is.vector}} \tab  \cr
+#!    \code{\link{identical.integer64}} \tab \code{\link{identical}} \tab  \cr
+#!    \code{\link{length<-.integer64}} \tab \code{\link{length<-}} \tab  \cr
+#!                                      \tab \code{\link{length}} \tab inherited from Base R \cr
+#!                                      \tab \code{\link{names<-}} \tab inherited from Base R \cr
+#!                                      \tab \code{\link{names}} \tab inherited from Base R \cr
+#!                                      \tab \code{\link{dim<-}} \tab inherited from Base R \cr
+#!                                      \tab \code{\link{dim}} \tab inherited from Base R \cr
+#!                                      \tab \code{\link{dimnames<-}} \tab inherited from Base R \cr
+#!                                      \tab \code{\link{dimnames}} \tab inherited from Base R \cr
+#!                                     \tab \code{\link{str}} \tab inherited from Base R, does not print values correctly \cr
+#!    \code{\link{print.integer64}} \tab \code{\link{print}} \tab  \cr
+#!    \code{\link{str.integer64}} \tab \code{\link{str}} \tab  \cr
+#!  \cr
+#!    \bold{coercing to integer64} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{as.integer64}} \tab   \tab generic \cr
+#!    \code{\link{as.integer64.character}} \tab \code{\link{character}} \tab  \cr
+#!    \code{\link{as.integer64.double}} \tab \code{\link{double}} \tab  \cr
+#!    \code{\link{as.integer64.integer}} \tab \code{\link{integer}} \tab  \cr
+#!    \code{\link{as.integer64.integer64}} \tab \code{integer64} \tab  \cr
+#!    \code{\link{as.integer64.logical}} \tab \code{\link{logical}} \tab  \cr
+#!    \code{\link{as.integer64.NULL}} \tab \code{\link{NULL}} \tab  \cr
+#!  \cr
+#!    \bold{coercing from integer64} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{as.bitstring}} \tab \code{\link{as.bitstring}} \tab generic \cr
+#!    \code{\link{as.bitstring.integer64}} \tab  \tab  \cr
+#!    \code{\link{as.character.integer64}} \tab \code{\link{as.character}} \tab  \cr
+#!    \code{\link{as.double.integer64}} \tab \code{\link{as.double}} \tab  \cr
+#!    \code{\link{as.integer.integer64}} \tab \code{\link{as.integer}} \tab  \cr
+#!    \code{\link{as.logical.integer64}} \tab \code{\link{as.logical}} \tab  \cr
+#!    %as.vector.integer64 removed as requested by the CRAN maintainer \code{\link{as.vector.integer64}} \tab \code{\link{as.vector}} \tab  \cr
+#!  \cr
+#!    \bold{data structures} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{c.integer64}} \tab \code{\link{c}} \tab vector concatenate \cr
+#!    \code{\link{cbind.integer64}} \tab \code{\link{cbind}} \tab column bind \cr
+#!    \code{\link{rbind.integer64}} \tab \code{\link{rbind}} \tab row bind \cr
+#!    \code{\link{as.data.frame.integer64}} \tab \code{\link{as.data.frame}} \tab coerce atomic object to data.frame \cr
+#!                                          \tab \code{\link{data.frame}} \tab inherited from Base R since we have coercion \cr
+#!  \cr
+#!    \bold{subscripting} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{[.integer64}} \tab \code{\link{[}} \tab vector and array extract \cr
+#!    \code{\link{[<-.integer64}} \tab \code{\link{[<-}} \tab vector and array assign \cr
+#!    \code{\link{[[.integer64}} \tab \code{\link{[[}} \tab scalar extract \cr
+#!    \code{\link{[[<-.integer64}} \tab \code{\link{[[<-}} \tab scalar assign \cr
+#!  \cr
+#!    \bold{binary operators} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{+.integer64}} \tab \code{\link{+}} \tab returns integer64 \cr
+#!    \code{\link{-.integer64}} \tab \code{\link{-}} \tab returns integer64 \cr
+#!    \code{\link{*.integer64}} \tab \code{\link{*}} \tab returns integer64 \cr
+#!    \code{\link{^.integer64}} \tab \code{\link{^}} \tab returns double \cr
+#!    \code{\link{/.integer64}} \tab \code{\link{/}} \tab returns double \cr
+#!    \code{\link{\%/\%.integer64}} \tab \code{\link{\%/\%}} \tab returns integer64 \cr
+#!    \code{\link{\%\%.integer64}} \tab \code{\link{\%\%}} \tab returns integer64 \cr
+#!  \cr
+#!    \bold{comparison operators} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{==.integer64}} \tab \code{\link{==}} \tab  \cr
+#!    \code{\link{!=.integer64}} \tab \code{\link{!=}} \tab  \cr
+#!    \code{\link{<.integer64}} \tab \code{\link{<}} \tab  \cr
+#!    \code{\link{<=.integer64}} \tab \code{\link{<=}} \tab  \cr
+#!    \code{\link{>.integer64}} \tab \code{\link{>}} \tab  \cr
+#!    \code{\link{>=.integer64}} \tab \code{\link{>=}} \tab  \cr
+#!  \cr
+#!    \bold{logical operators} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{!.integer64}} \tab \code{\link{!}} \tab  \cr
+#!    \code{\link{&.integer64}} \tab \code{\link{&}} \tab  \cr
+#!    \code{\link{|.integer64}} \tab \code{\link{|}} \tab  \cr
+#!    \code{\link{xor.integer64}} \tab \code{\link{xor}} \tab  \cr
+#!  \cr
+#!    \bold{math functions} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{is.na.integer64}} \tab \code{\link{is.na}} \tab returns logical \cr
+#!    \code{\link{format.integer64}} \tab \code{\link{format}} \tab returns character \cr
+#!    \code{\link{abs.integer64}} \tab \code{\link{abs}} \tab returns integer64 \cr
+#!    \code{\link{sign.integer64}} \tab \code{\link{sign}} \tab returns integer64 \cr
+#!    \code{\link{log.integer64}} \tab \code{\link{log}} \tab returns double \cr
+#!    \code{\link{log10.integer64}} \tab \code{\link{log10}} \tab  returns double \cr
+#!    \code{\link{log2.integer64}} \tab \code{\link{log2}} \tab  returns double \cr
+#!    \code{\link{sqrt.integer64}} \tab \code{\link{sqrt}} \tab  returns double \cr
+#!    \code{\link{ceiling.integer64}} \tab \code{\link{ceiling}} \tab dummy returning its argument \cr
+#!    \code{\link{floor.integer64}} \tab \code{\link{floor}} \tab dummy returning its argument \cr
+#!    \code{\link{trunc.integer64}} \tab \code{\link{trunc}} \tab dummy returning its argument \cr
+#!    \code{\link{round.integer64}} \tab \code{\link{round}} \tab dummy returning its argument \cr
+#!    \code{\link{signif.integer64}} \tab \code{\link{signif}} \tab dummy returning its argument \cr
+#!  \cr
+#!    \bold{cumulative functions} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{cummin.integer64}} \tab \code{\link{cummin}} \tab \cr
+#!    \code{\link{cummax.integer64}} \tab \code{\link{cummax}} \tab \cr
+#!    \code{\link{cumsum.integer64}} \tab \code{\link{cumsum}} \tab \cr
+#!    \code{\link{cumprod.integer64}} \tab \code{\link{cumprod}} \tab \cr
+#!    \code{\link{diff.integer64}} \tab \code{\link{diff}} \tab \cr
+#!  \cr
+#!    \bold{summary functions} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{range.integer64}} \tab \code{\link{range}} \tab \cr
+#!    \code{\link{min.integer64}} \tab \code{\link{min}} \tab  \cr
+#!    \code{\link{max.integer64}} \tab \code{\link{max}} \tab  \cr
+#!    \code{\link{sum.integer64}} \tab \code{\link{sum}} \tab  \cr
+#!    \code{\link{mean.integer64}} \tab \code{\link{mean}} \tab  \cr
+#!    \code{\link{prod.integer64}} \tab \code{\link{prod}} \tab  \cr
+#!    \code{\link{all.integer64}} \tab \code{\link{all}} \tab  \cr
+#!    \code{\link{any.integer64}} \tab \code{\link{any}} \tab  \cr
+#!  \cr
+#!    \bold{algorithmically complex functions} \tab \bold{see also}          \tab \bold{description (caching)}  \cr
+#!    \code{\link{match.integer64}} \tab \code{\link{match}} \tab position of x in table (h//o/so) \cr
+#!    \code{\link{\%in\%.integer64}} \tab \code{\link{\%in\%}} \tab is x in table? (h//o/so) \cr
+#!    \code{\link{duplicated.integer64}} \tab \code{\link{duplicated}} \tab is current element duplicate of previous one? (h//o/so) \cr
+#!    \code{\link{unique.integer64}} \tab \code{\link{unique}} \tab (shorter) vector of unique values only (h/s/o/so) \cr
+#!    \code{\link{unipos.integer64}} \tab \code{\link{unipos}} \tab positions corresponding to unique values (h/s/o/so) \cr
+#!    \code{\link{tiepos.integer64}} \tab \code{\link{tiepos}} \tab positions of values that are tied (//o/so) \cr
+#!    \code{\link{keypos.integer64}} \tab \code{\link{keypos}} \tab position of current value in sorted list of unique values (//o/so) \cr
+#!    \code{\link{as.factor.integer64}} \tab \code{\link{as.factor}} \tab convert to (unordered) factor with sorted levels of previous values (//o/so) \cr
+#!    \code{\link{as.ordered.integer64}} \tab \code{\link{as.ordered}} \tab convert to ordered factor with sorted levels of previous values (//o/so) \cr
+#!    \code{\link{table.integer64}} \tab \code{\link{table}} \tab unique values and their frequencies (h/s/o/so) \cr
+#!    \code{\link{sort.integer64}} \tab \code{\link{sort}} \tab sorted vector (/s/o/so) \cr
+#!    \code{\link{order.integer64}} \tab \code{\link{order}} \tab positions of elements that would create sorted vector (//o/so) \cr
+#!    \code{\link{rank.integer64}} \tab \code{\link{rank}} \tab (average) ranks of non-NAs, NAs kept in place (/s/o/so) \cr
+#!    \code{\link{quantile.integer64}} \tab \code{\link{quantile}} \tab (existing) values at specified percentiles (/s/o/so) \cr
+#!    \code{\link{median.integer64}} \tab \code{\link{median}} \tab (existing) value at percentile 0.5 (/s/o/so) \cr
+#!    \code{\link{summary.integer64}} \tab \code{\link{summary}} \tab  (/s/o/so) \cr
+#!  \cr
+#!    \bold{helper functions} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{\link{minusclass}} \tab \code{\link{minusclass}} \tab removing class attritbute \cr
+#!    \code{\link{plusclass}} \tab \code{\link{plusclass}} \tab inserting class attribute \cr
+#!    \code{\link{binattr}} \tab \code{\link{binattr}} \tab define binary op behaviour \cr
+#!  \cr
+#!    \bold{tested I/O functions} \tab \bold{see also}          \tab \bold{description} \cr
+#!                                \tab \code{\link{read.table}} \tab inherited from Base R \cr
+#!                                \tab \code{\link{write.table}} \tab inherited from Base R \cr
+#!                                \tab \code{\link{serialize}} \tab inherited from Base R \cr
+#!                                \tab \code{\link{unserialize}} \tab inherited from Base R \cr
+#!                                \tab \code{\link{save}} \tab inherited from Base R \cr
+#!                                \tab \code{\link{load}} \tab inherited from Base R \cr
+#!                                \tab \code{\link{dput}} \tab inherited from Base R \cr
+#!                                \tab \code{\link{dget}} \tab inherited from Base R \cr
+#! }
+#! }
+#! \section{Limitations inherited from implementing 64 bit integers via an external package}{
+#!   \itemize{
+#!     \item \bold{vector size} of atomic vectors is still limited to \code{\link{.Machine}$integer.max}. 
+#!     However, external memory extending packages such as \code{\link[ff]{ff}} or \code{bigmemory} 
+#!     can extend their address space now with \code{integer64}. Having 64 bit integers also help 
+#!     with those not so obvious address issues that arise once we exchange data with SQL databases 
+#!     and datawarehouses, which use big integers as surrogate keys, e.g. on indexed primary key columns.
+#!     This puts R into a relatively strong position compared to certain commercial statistical 
+#!     softwares, which sell database connectivity but neither have the range of 64 bit integers, 
+#!     nor have integers at all, nor have a single numeric data type in their macro-glue-language.
+#!
+#!     \item \bold{literals} such as \code{123LL} would require changes to Base R, up to then we need to write (and call) 
+#!     \code{as.integer64(123L)} or \code{as.integer64(123)} or \code{as.integer64('123')}. 
+#!     Only the latter allows to specify numbers beyond Base R's numeric data types and therefore is the recommended
+#!     way to use -- using only one way may facilitate migrating code to literals at a later stage.
+#!
+#!   }
+#! }
+#! \section{Limitations inherited from Base R, Core team, can you change this?}{
+#!   \itemize{
+#!     \item \bold{\code{\link{identical}}} with default parameters does not distinguish all bit-patterns of doubles. 
+#!     For testing purposes we provide a wrapper \code{\link{identical.integer64}} that will distinguish all bit-patterns.
+#!     It would be desireable to have a single call of \code{\link{identical}} handle both, \code{\link{double}} and \code{integer64}.
+#! 
+#!     \item the \bold{colon} operator \code{\link{:}} officially does not dispatches S3 methods, however, we have made it generic
+#!      \preformatted{
+#!      from <- lim.integer64()[1]
+#!      to <- from+99
+#!      from:to
+#!    }
+#!    As a limitation remains: it will only dispatch at its first argument \code{from} but not at its second \code{to}.
+#! 
+#!     \item \bold{\code{\link{is.double}}} does not dispatches S3 methods, However, we have made it generic 
+#!		and it will return \code{FALSE} on \code{integer64}.
+#!
+#!     \item \bold{\code{\link{c}}} only dispatches \code{\link{c.integer64}} if the first argument is \code{integer64}
+#!     and it does not recursively dispatch the proper method when called with argument \code{recursive=TRUE}
+#!     Therefore \preformatted{
+#!       c(list(integer64,integer64))
+#!     }
+#!      does not work and for now you can only call \preformatted{
+#!        c.integer64(list(x,x))
+#!      }
+#!
+#!     \item \bold{generic binary operators} fail to dispatch *any* user-defined S3 method 
+#!     if the two arguments have two different S3 classes. For example we have two classes 
+#!     \code{\link{bit}} and \code{\link{bitwhich}} sparsely representing boolean vectors 
+#!     and we have methods \code{\link{&.bit}} and \code{\link{&.bitwhich}}. For an expression
+#!     involving both as in \code{ bit & bitwhich}, none of the two methods is dispatched. 
+#!     Instead a standard method is dispatched, which neither handles \code{\link{bit}} 
+#!     nor \code{\link{bitwhich}}. Although it lacks symmetry, the better choice would be to 
+#!     dispatch simply the method of the class of the first argument in case of class conflict. 
+#!     This choice would allow authors of extension packages providing coherent behaviour 
+#!     at least within their contributed classes. But as long as none of the package authors 
+#!     methods is dispatched, he cannot handle the conflicting classes at all.
+#!
+#!     \item \bold{\code{\link{unlist}}} is not generic and if it were, we would face similar problems as with \code{c()}
+#!
+#!     \item \bold{\code{\link{vector}}} with argument \code{mode='integer64'} cannot work without adjustment of Base R
+#!     \item \bold{\code{\link{as.vector}}} with argument \code{mode='integer64'} cannot work without adjustment of Base R
+#!
+#!     \item \bold{\code{\link{is.vector}}} does not dispatch its method \code{\link{is.vector.integer64}}
+#!
+#!     \item \bold{\code{\link{mode<-}}} drops the class 'integer64' which is returned from \code{as.integer64}.
+#!        Also it does not remove an existing class 'integer64' when assigning mode 'integer'. 
+#!
+#!     \item \bold{\code{\link{storage.mode<-}}} does not support external data types such as \code{as.integer64}
+#!
+#!     \item \bold{\code{\link{matrix}}} does drop the 'integer64' class attribute.
+#!
+#!     \item \bold{\code{\link{array}}}  does drop the 'integer64' class attribute. 
+#!            In current R versions (1.15.1) this can be circumvented by activating the function 
+#!						\code{as.vector.integer64} further down this file.
+#!						However, the CRAN maintainer has requested to remove \code{as.vector.integer64}, 
+#! 						even at the price of breaking previously working functionality of the package. 
+#!
+#!     \item \bold{\code{\link{str}}} does not print the values of \code{integer64} correctly
+#!
+#!   }
+#! }
+#! \section{further limitations}{
+#!   \itemize{
+#!     \item \bold{subscripting} non-existing elements and subscripting with \code{NA}s is currently not supported. 
+#!     Such subscripting currently returns \code{9218868437227407266} instead of \code{NA} (the \code{NA} value of the underlying double code).
+#!     Following the full R behaviour here would either destroy performance or require extensive C-coding. 
+#!   }
+#! }
+#! \note{
+#!    \code{integer64} are useful for handling database keys and exact counting in +-2^63.
+#!    Do not use them as replacement for 32bit integers, integer64 are not
+#!    supported for subscripting by R-core and they have different semantics 
+#!    when combined with double. Do understand that \code{integer64} can only be
+#!    useful over \code{double} if we do not coerce it to \code{double}. \cr
+#!   \cr
+#!   While \cr
+#!   integer + double -> double + double -> double \cr
+#!   or \cr
+#!   1L + 0.5 -> 1.5 \cr 
+#!   for additive operations we coerce to \code{integer64} \cr
+#!   integer64 + double ->  integer64 + integer64 -> integer64 \cr
+#!   hence \cr
+#!   as.integer64(1) + 0.5 -> 1LL + 0LL -> 1LL \cr
+#!   \cr
+#!   see section "Arithmetic precision and coercion" above
+#! }
+#! \value{
+#!   \code{integer64} returns a vector of 'integer64', 
+#!    i.e. a vector of \code{\link{double}} decorated with class 'integer64'.
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! Maintainer: Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ package }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{integer}} in base R }
+#! \examples{
+#! message("Using integer64 in vector")
+#! x <- integer64(8)    # create 64 bit vector
+#! x
+#! is.atomic(x)         # TRUE
+#! is.integer64(x)      # TRUE
+#! is.numeric(x)        # TRUE
+#! is.integer(x)        # FALSE - debatable
+#! is.double(x)         # FALSE - might change
+#! x[] <- 1:2           # assigned value is recycled as usual
+#! x[1:6]               # subscripting as usual
+#! length(x) <- 13      # changing length as usual
+#! x
+#! rep(x, 2)            # replicate as usual
+#! seq(as.integer64(1), 10)     # seq.integer64 is dispatched on first given argument
+#! seq(to=as.integer64(10), 1)  # seq.integer64 is dispatched on first given argument
+#! seq.integer64(along.with=x)  # or call seq.integer64 directly
+#! # c.integer64 is dispatched only if *first* argument is integer64 ...
+#! x <- c(x,runif(length(x), max=100)) 
+#! # ... and coerces everything to integer64 - including double
+#! x                                   
+#! names(x) <- letters  # use names as usual
+#! x
+#! 
+#! message("Using integer64 in array - note that 'matrix' currently does not work")
+#! message("as.vector.integer64 removed as requested by the CRAN maintainer")
+#! message("as consequence 'array' also does not work anymore")
+#! %y <- array(as.integer64(NA), dim=c(3,4), dimnames=list(letters[1:3], LETTERS[1:4]))
+#! message("we still can create a matrix or array by assigning 'dim'")
+#! y <- rep(as.integer64(NA), 12)
+#! dim(y) <- c(3,4)
+#! dimnames(y) <- list(letters[1:3], LETTERS[1:4])
+#! y["a",] <- 1:2       # assigning as usual
+#! y
+#! y[1:2,-4]            # subscripting as usual
+#! # cbind.integer64 dispatched on any argument and coerces everything to integer64
+#! cbind(E=1:3, F=runif(3, 0, 100), G=c("-1","0","1"), y)
+#! 
+#! message("Using integer64 in data.frame")
+#! str(as.data.frame(x))
+#! str(as.data.frame(y))
+#! str(data.frame(y))
+#! str(data.frame(I(y)))
+#! d <- data.frame(x=x, y=runif(length(x), 0, 100))
+#! d
+#! d$x
+#! 
+#! message("Using integer64 with csv files")
+#! fi64 <- tempfile()
+#! write.csv(d, file=fi64, row.names=FALSE)
+#! e <- read.csv(fi64, colClasses=c("integer64", NA))
+#! unlink(fi64)
+#! str(e)
+#! identical.integer64(d$x,e$x)
+#! 
+#! message("Serializing and unserializing integer64")
+#! dput(d, fi64)
+#! e <- dget(fi64)
+#! identical.integer64(d$x,e$x)
+#! e <- d[,]
+#! save(e, file=fi64)
+#! rm(e)
+#! load(file=fi64)
+#! identical.integer64(d,e)
+#! 
+#! ### A couple of unit tests follow hidden in a dontshow{} directive ###
+#!   \dontshow{
+#! message("Testing identical.integer64")
+#! i64 <- as.double(NA); class(i64) <- "integer64"
+#! stopifnot(identical(unclass(i64-1), unclass(i64+1)))
+#! stopifnot(identical(i64-1, i64+1))
+#! stopifnot(!identical.integer64(i64-1, i64+1))
+#! 
+#! message("Testing dispatch of 'c' method")
+#! stopifnot(identical.integer64(c(integer64(0), NA), as.integer64(NA)))
+#! message("Dispatch on the second argument fails and we want to be notified once that changes")
+#! stopifnot(!identical.integer64(c(NA, integer64(0)), as.integer64(NA)))
+#! 
+#! message("Testing minus and plus")
+#! d64 <- c(-.Machine$double.base^.Machine$double.digits, -.Machine$integer.max, -1, 0, 1, .Machine$integer.max, .Machine$double.base^.Machine$double.digits)
+#! i64 <- as.integer64(d64)
+#! stopifnot(identical.integer64(i64-1+1,i64))
+#! stopifnot(identical.integer64(i64+1-1,i64))
+#! 
+#! message("Testing minus and plus edge cases and 'rev'\n")
+#! stopifnot(identical.integer64(lim.integer64()+1-1, c(lim.integer64()[1], NA)))
+#! stopifnot(identical.integer64(rev(lim.integer64())-1+1, c(lim.integer64()[2], NA)))
+#! 
+#! message("Testing 'range.integer64', multiplication and integer division")
+#! i64 <- integer64(63)
+#! i64[1] <- 1
+#! for (i in 2:63)
+#! 	i64[i] <- 2*i64[i-1]
+#! stopifnot(identical.integer64(i64 * rev(i64), rep(i64[63], 63)))
+#! for (i in 63:2)
+#! 	i64[i-1] <- i64[i]\%/\%2
+#! stopifnot(identical.integer64(i64 * rev(i64), rep(i64[63], 63)))
+#! for (i in 63:2)
+#! 	i64[i-1] <- i64[i]/2
+#! stopifnot(identical.integer64(i64 * rev(i64), rep(i64[63], 63)))
+#! stopifnot(identical.integer64(c( -i64[63] - (i64[63]-1), i64[63]+(i64[63]-1) ), lim.integer64()))
+#! 
+#! stopifnot(identical.integer64(i64[-1]\%/\%2*as.integer64(2), i64[-1]))
+#! stopifnot(identical.integer64(i64[-1]\%/\%2L*as.integer64(2), i64[-1]))
+#! stopifnot(identical.integer64(i64[-1]/2*as.integer64(2), i64[-1]))
+#! stopifnot(identical.integer64(i64[-1]/2*as.integer64(2), i64[-1]))
+#! 
+#! stopifnot(identical.integer64(i64[-63]*2\%/\%2, i64[-63]))
+#! stopifnot(identical.integer64(i64[-63]*2L\%/\%2L, i64[-63]))
+#! stopifnot(identical.integer64(as.integer64(i64[-63]*2/2), i64[-63]))
+#! stopifnot(identical.integer64(as.integer64(i64[-63]*2L/2L), i64[-63]))
+#! 
+#! message("Testing sqrt, power and log")
+#! stopifnot(identical.integer64( as.integer64(sqrt(i64[-1][c(FALSE, TRUE)])*sqrt(i64[-1][c(FALSE, TRUE)])), i64[-1][c(FALSE, TRUE)] ))
+#! 
+#! stopifnot(identical.integer64(as.integer64(2)^(0:62), i64))
+#! stopifnot(identical.integer64(as.integer64(0:62), as.integer64(round(log2(i64)))))
+#! stopifnot(identical.integer64(as.integer64(round(log(as.integer64(2)^(0:62), 2))), as.integer64(0:62)))
+#! stopifnot(identical.integer64(as.integer64(round(log(as.integer64(3)^(0:39), 3))), as.integer64(0:39)))
+#! stopifnot(identical.integer64(as.integer64(round(log(as.integer64(10)^(0:18), 10))), as.integer64(0:18)))
+#! stopifnot(identical.integer64(as.integer64(round(log10(as.integer64(10)^(0:18)))), as.integer64(0:18)))
+#! 
+#! stopifnot(identical.integer64((as.integer64(2)^(1:62))^(1/1:62), as.integer64(rep(2, 62))))
+#! stopifnot(identical.integer64((as.integer64(3)^(1:39))^(1/1:39), as.integer64(rep(3, 39))))
+#! stopifnot(identical.integer64((as.integer64(10)^(1:18))^(1/1:18), as.integer64(rep(10, 18))))
+#! 
+#! message("Testing c and rep")
+#! stopifnot(identical.integer64( as.integer64(rep(1:3, 1:3)), rep(as.integer64(1:3), 1:3)))
+#! stopifnot(identical.integer64( as.integer64(rep(1:3, 3)), rep(as.integer64(1:3), 3)))
+#!  
+#! x <- as.double(c(NA,NA,NA))
+#! class(x) <- "integer64"
+#! x <- x + -1:1
+#! stopifnot(identical.integer64(rep(x, 3), c(x,x,x) ))
+#! stopifnot(identical.integer64(c.integer64(list(x,x,x), recursive=TRUE), c(x,x,x) ))
+#! 
+#! message("Testing seq")
+#! stopifnot(identical.integer64(seq(as.integer64(1), 10, 2), as.integer64(seq(1, 10, 2)) ))
+#! stopifnot(identical.integer64(seq(as.integer64(1), by=2, length.out=5), as.integer64(seq(1, by=2, length.out=5)) ))
+#! stopifnot(identical.integer64(seq(as.integer64(1), by=2, length.out=6), as.integer64(seq(1, by=2, length.out=6)) ))
+#! stopifnot(identical.integer64(seq.integer64(along.with=3:5), as.integer64(seq(along.with=3:5)) ))
+#! stopifnot(identical.integer64(seq(as.integer64(1), to=-9), as.integer64(seq(1, to=-9)) ))
+#! 
+#! message("Testing cbind and rbind")
+#! stopifnot(identical.integer64( cbind(as.integer64(1:3), 1:3), {x <- rep(as.integer64(1:3), 2); dim(x)<-c(3,2);x}))
+#! stopifnot(identical.integer64( rbind(as.integer64(1:3), 1:3), t({x <- rep(as.integer64(1:3), 2); dim(x)<-c(3,2);x})))
+#! 
+#! message("Testing coercion")
+#! stopifnot(identical( as.double(as.integer64(c(NA, seq(0, 9, 0.25)))), as.double(as.integer(c(NA, seq(0, 9, 0.25))))))
+#! stopifnot(identical( as.character(as.integer64(c(NA, seq(0, 9, 0.25)))), as.character(as.integer(c(NA, seq(0, 9, 0.25))))))
+#! stopifnot(identical( as.integer(as.integer64(c(NA, seq(0, 9, 0.25)))), as.integer(c(NA, seq(0, 9, 0.25)))))
+#! stopifnot(identical( as.logical(as.integer64(c(NA, seq(0, 9, 0.25)))), as.logical(as.integer(c(NA, seq(0, 9, 0.25))))))
+#! stopifnot(identical( as.integer(as.integer64(c(NA, FALSE, TRUE))), as.integer(c(NA, FALSE, TRUE))))
+#! stopifnot(identical( as.integer64(as.integer(as.integer64(-9:9))), as.integer64(-9:9)))
+#! stopifnot(identical( as.integer64(as.double(as.integer64(-9:9))), as.integer64(-9:9)))
+#! stopifnot(identical( as.integer64(as.character(as.integer64(-9:9))), as.integer64(-9:9)))
+#! stopifnot(identical( as.integer64(as.character(lim.integer64())), lim.integer64()))
+#!
+#! message("-- testing logical operators --")
+#! stopifnot(identical.integer64(!c(NA, -1:1), !c(as.integer64(NA), -1:1)))
+#! stopifnot(identical.integer64(rep(c(NA, -1:1), 4)&rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))&as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+#! stopifnot(identical.integer64(rep(c(NA, -1:1), 4)|rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))|as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+#! stopifnot(identical.integer64(xor(rep(c(NA, -1:1), 4),rep(c(NA, -1:1), rep(4, 4))), xor(as.integer64(rep(c(NA, -1:1), 4)),as.integer64(rep(c(NA, -1:1), rep(4, 4))))))
+#! 
+#! message("-- testing comparison operators --")
+#! stopifnot(identical.integer64(rep(c(NA, -1:1), 4)==rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))==as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+#! stopifnot(identical.integer64(rep(c(NA, -1:1), 4)!=rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))!=as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+#! stopifnot(identical.integer64(rep(c(NA, -1:1), 4)>rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))>as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+#! stopifnot(identical.integer64(rep(c(NA, -1:1), 4)>=rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))>=as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+#! stopifnot(identical.integer64(rep(c(NA, -1:1), 4)<rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))<as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+#! stopifnot(identical.integer64(rep(c(NA, -1:1), 4)<=rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))<=as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+#!
+#! message("-- testing vector functions --")
+#! stopifnot(identical.integer64( is.na(as.integer64(c(NA, -1:1))), is.na(c(NA, -1:1)) ))
+#! stopifnot(identical.integer64( format(as.integer64(c(NA, -1:1))), format(c(NA, -1:1)) ))
+#! stopifnot(identical.integer64( abs(as.integer64(c(NA, -1:1))), as.integer64(abs(c(NA, -1:1))) ))
+#! stopifnot(identical.integer64( sign(as.integer64(c(NA, -1:1))), as.integer64(sign(c(NA, -1:1))) ))
+#! stopifnot(identical.integer64( ceiling(as.integer64(c(NA, -1:1))), as.integer64(ceiling(c(NA, -1:1))) ))
+#! stopifnot(identical.integer64( floor(as.integer64(c(NA, -1:1))), as.integer64(floor(c(NA, -1:1))) ))
+#! stopifnot(identical.integer64( trunc(as.integer64(c(NA, -1:1))), as.integer64(trunc(c(NA, -1:1))) ))
+#! stopifnot(identical.integer64( signif(as.integer64(c(NA, -1:1))), as.integer64(c(NA, -1:1)) ))
+#!
+#! message("Testing summary functions")
+#! stopifnot(identical(all(as.integer(1)), all(as.integer64(1))))
+#! stopifnot(identical(all(as.integer(0)), all(as.integer64(0))))
+#! stopifnot(identical(all(as.integer(NA)), all(as.integer64(NA))))
+#! stopifnot(identical(all(as.integer(NA), na.rm=TRUE), all(as.integer64(NA), na.rm=TRUE)))
+#! stopifnot(identical(all(as.integer(1), NA), all(as.integer64(1), NA)))
+#! stopifnot(identical(all(as.integer(0), NA), all(as.integer64(0), NA)))
+#! stopifnot(identical(all(as.integer(1), NA, na.rm=TRUE), all(as.integer64(1), NA, na.rm=TRUE)))
+#! stopifnot(identical(all(as.integer(0), NA, na.rm=TRUE), all(as.integer64(0), NA, na.rm=TRUE)))
+#! stopifnot(identical(all(as.integer(c(1, NA))), all(as.integer64(c(1, NA)))))
+#! stopifnot(identical(all(as.integer(c(0, NA))), all(as.integer64(c(0, NA)))))
+#! stopifnot(identical(all(as.integer(c(1, NA)), na.rm=TRUE), all(as.integer64(c(1, NA)), na.rm=TRUE)))
+#! stopifnot(identical(all(as.integer(c(0, NA)), na.rm=TRUE), all(as.integer64(c(0, NA)), na.rm=TRUE)))
+#! 
+#! stopifnot(identical(any(as.integer(1)), any(as.integer64(1))))
+#! stopifnot(identical(any(as.integer(0)), any(as.integer64(0))))
+#! stopifnot(identical(any(as.integer(NA)), any(as.integer64(NA))))
+#! stopifnot(identical(any(as.integer(NA), na.rm=TRUE), any(as.integer64(NA), na.rm=TRUE)))
+#! stopifnot(identical(any(as.integer(1), NA), any(as.integer64(1), NA)))
+#! stopifnot(identical(any(as.integer(0), NA), any(as.integer64(0), NA)))
+#! stopifnot(identical(any(as.integer(1), NA, na.rm=TRUE), any(as.integer64(1), NA, na.rm=TRUE)))
+#! stopifnot(identical(any(as.integer(0), NA, na.rm=TRUE), any(as.integer64(0), NA, na.rm=TRUE)))
+#! stopifnot(identical(any(as.integer(c(1, NA))), any(as.integer64(c(1, NA)))))
+#! stopifnot(identical(any(as.integer(c(0, NA))), any(as.integer64(c(0, NA)))))
+#! stopifnot(identical(any(as.integer(c(1, NA)), na.rm=TRUE), any(as.integer64(c(1, NA)), na.rm=TRUE)))
+#! stopifnot(identical(any(as.integer(c(0, NA)), na.rm=TRUE), any(as.integer64(c(0, NA)), na.rm=TRUE)))
+#! 
+#! stopifnot(identical.integer64(as.integer64(sum(c(2, 3, NA))), sum(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(sum(c(2, 3, NA), na.rm=TRUE)), sum(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(sum(c(2, 3, NA))), sum(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(sum(c(2, 3, NA), na.rm=TRUE)), sum(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(sum(2, 3, NA)), sum(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(sum(2, 3, NA, na.rm=TRUE)), sum(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(sum(2, 3, NA)), sum(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(sum(2, 3, NA, na.rm=TRUE)), sum(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! 
+#! stopifnot(identical.integer64(as.integer64(prod(c(2, 3, NA))), prod(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(prod(c(2, 3, NA), na.rm=TRUE)), prod(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(prod(c(2, 3, NA))), prod(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(prod(c(2, 3, NA), na.rm=TRUE)), prod(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(prod(2, 3, NA)), prod(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(prod(2, 3, NA, na.rm=TRUE)), prod(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(prod(2, 3, NA)), prod(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(prod(2, 3, NA, na.rm=TRUE)), prod(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! 
+#! stopifnot(identical.integer64(as.integer64(min(c(2, 3, NA))), min(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(min(c(2, 3, NA), na.rm=TRUE)), min(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(min(c(2, 3, NA))), min(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(min(c(2, 3, NA), na.rm=TRUE)), min(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(min(2, 3, NA)), min(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(min(2, 3, NA, na.rm=TRUE)), min(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(min(2, 3, NA)), min(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(min(2, 3, NA, na.rm=TRUE)), min(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! 
+#! stopifnot(identical.integer64(as.integer64(max(c(2, 3, NA))), max(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(max(c(2, 3, NA), na.rm=TRUE)), max(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(max(c(2, 3, NA))), max(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(max(c(2, 3, NA), na.rm=TRUE)), max(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(max(2, 3, NA)), max(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(max(2, 3, NA, na.rm=TRUE)), max(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(max(2, 3, NA)), max(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(max(2, 3, NA, na.rm=TRUE)), max(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! 
+#! stopifnot(identical.integer64(as.integer64(range(c(2, 3, NA))), range(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(range(c(2, 3, NA), na.rm=TRUE)), range(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(range(c(2, 3, NA))), range(as.integer64(c(2, 3, NA)))))
+#! stopifnot(identical.integer64(as.integer64(range(c(2, 3, NA), na.rm=TRUE)), range(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(range(2, 3, NA)), range(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(range(2, 3, NA, na.rm=TRUE)), range(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! stopifnot(identical.integer64(as.integer64(range(2, 3, NA)), range(as.integer64(2), 3, NA)))
+#! stopifnot(identical.integer64(as.integer64(range(2, 3, NA, na.rm=TRUE)), range(as.integer64(2), 3, NA, na.rm=TRUE)))
+#! 
+#! message("-- testing cummulative functions --")
+#! stopifnot(identical.integer64(as.integer64(cumsum(c(2, 3, NA, 1, 4))), cumsum(as.integer64(c(2, 3, NA, 1, 4)))))
+#! stopifnot(identical.integer64(as.integer64(cumprod(c(2, 3, NA, 1, 4))), cumprod(as.integer64(c(2, 3, NA, 1, 4)))))
+#! stopifnot(identical.integer64(as.integer64(cummin(c(2, 3, NA, 1, 4))), cummin(as.integer64(c(2, 3, NA, 1, 4)))))
+#! stopifnot(identical.integer64(as.integer64(cummax(c(2, 3, NA, 1, 4))), cummax(as.integer64(c(2, 3, NA, 1, 4)))))
+#! 
+#! message("testing diff")
+#! d64 <- diffinv(rep(.Machine$integer.max, 100), lag=2, differences=2)
+#! i64 <- as.integer64(d64)
+#! identical(diff(d64, lag=2, differences=2), as.double(diff(i64, lag=2, differences=2)))
+#!
+#!   }
+#!
+#!   \dontrun{
+#! message("== Differences between integer64 and int64 ==")
+#! require(bit64)
+#! require(int64)
+#! 
+#! message("-- integer64 is atomic --")
+#! is.atomic(integer64())
+#! #is.atomic(int64())
+#! str(integer64(3))
+#! #str(int64(3))
+#! 
+#! message("-- The following performance numbers are measured under RWin64  --")
+#! message("-- under RWin32 the advantage of integer64 over int64 is smaller --")
+#!
+#! message("-- integer64 needs 7x/5x less RAM than int64 under 64/32 bit OS 
+#! (and twice the RAM of integer as it should be) --")
+#! #as.vector(object.size(int64(1e6))/object.size(integer64(1e6)))
+#! as.vector(object.size(integer64(1e6))/object.size(integer(1e6)))
+#! 
+#! message("-- integer64 creates 2000x/1300x faster than int64 under 64/32 bit OS
+#! (and 3x the time of integer) --")
+#! t32 <- system.time(integer(1e8))
+#! t64 <- system.time(integer64(1e8))
+#! #T64 <- system.time(int64(1e7))*10  # using 1e8 as above stalls our R on an i7 8 GB RAM Thinkpad
+#! #T64/t64
+#! t64/t32
+#! 
+#! i32 <- sample(1e6)
+#! d64 <- as.double(i32)
+#! 
+#! message("-- the following timings are rather conservative since timings
+#!  of integer64 include garbage collection -- due to looped calls")
+#! message("-- integer64 coerces 900x/100x faster than int64 
+#!  under 64/32 bit OS (and 2x the time of coercing to integer) --")
+#! t32 <- system.time(for(i in 1:1000)as.integer(d64))
+#! t64 <- system.time(for(i in 1:1000)as.integer64(d64))
+#! #T64 <- system.time(as.int64(d64))*1000
+#! #T64/t64
+#! t64/t32
+#! td64 <- system.time(for(i in 1:1000)as.double(i32))
+#! t64 <- system.time(for(i in 1:1000)as.integer64(i32))
+#! #T64 <- system.time(for(i in 1:10)as.int64(i32))*100
+#! #T64/t64
+#! t64/td64
+#! 
+#! message("-- integer64 serializes 4x/0.8x faster than int64 
+#!  under 64/32 bit OS (and less than 2x/6x the time of integer or double) --")
+#! t32 <- system.time(for(i in 1:10)serialize(i32, NULL))
+#! td64 <- system.time(for(i in 1:10)serialize(d64, NULL))
+#! i64 <- as.integer64(i32); 
+#! t64 <- system.time(for(i in 1:10)serialize(i64, NULL))
+#! rm(i64); gc()
+#! #I64 <- as.int64(i32); 
+#! #T64 <- system.time(for(i in 1:10)serialize(I64, NULL))
+#! #rm(I64); gc()
+#! #T64/t64
+#! t64/t32
+#! t64/td64
+#! 
+#! 
+#! message("-- integer64 adds 250x/60x faster than int64
+#!  under 64/32 bit OS (and less than 6x the time of integer or double) --")
+#! td64 <- system.time(for(i in 1:100)d64+d64)
+#! t32 <- system.time(for(i in 1:100)i32+i32)
+#! i64 <- as.integer64(i32); 
+#! t64 <- system.time(for(i in 1:100)i64+i64)
+#! rm(i64); gc()
+#! #I64 <- as.int64(i32); 
+#! #T64 <- system.time(for(i in 1:10)I64+I64)*10
+#! #rm(I64); gc()
+#! #T64/t64
+#! t64/t32
+#! t64/td64
+#! 
+#! message("-- integer64 sums 3x/0.2x faster than int64 
+#! (and at about 5x/60X the time of integer and double) --")
+#! td64 <- system.time(for(i in 1:100)sum(d64))
+#! t32 <- system.time(for(i in 1:100)sum(i32))
+#! i64 <- as.integer64(i32); 
+#! t64 <- system.time(for(i in 1:100)sum(i64))
+#! rm(i64); gc()
+#! #I64 <- as.int64(i32); 
+#! #T64 <- system.time(for(i in 1:100)sum(I64))
+#! #rm(I64); gc()
+#! #T64/t64
+#! t64/t32
+#! t64/td64
+#! 
+#! message("-- integer64 diffs 5x/0.85x faster than integer and double
+#! (int64 version 1.0 does not support diff) --")
+#! td64 <- system.time(for(i in 1:10)diff(d64, lag=2L, differences=2L))
+#! t32 <- system.time(for(i in 1:10)diff(i32, lag=2L, differences=2L))
+#! i64 <- as.integer64(i32); 
+#! t64 <- system.time(for(i in 1:10)diff(i64, lag=2L, differences=2L))
+#! rm(i64); gc()
+#! t64/t32
+#! t64/td64
+#! 
+#! 
+#! message("-- integer64 subscripts 1000x/340x faster than int64
+#! (and at the same speed / 10x slower as integer) --")
+#! ts32 <- system.time(for(i in 1:1000)sample(1e6, 1e3))
+#! t32<- system.time(for(i in 1:1000)i32[sample(1e6, 1e3)])
+#! i64 <- as.integer64(i32); 
+#! t64 <- system.time(for(i in 1:1000)i64[sample(1e6, 1e3)])
+#! rm(i64); gc()
+#! #I64 <- as.int64(i32); 
+#! #T64 <- system.time(for(i in 1:100)I64[sample(1e6, 1e3)])*10
+#! #rm(I64); gc()
+#! #(T64-ts32)/(t64-ts32)
+#! (t64-ts32)/(t32-ts32)
+#! 
+#! message("-- integer64 assigns 200x/90x faster than int64
+#! (and 50x/160x slower than integer) --")
+#! ts32 <- system.time(for(i in 1:100)sample(1e6, 1e3))
+#! t32 <- system.time(for(i in 1:100)i32[sample(1e6, 1e3)] <- 1:1e3)
+#! i64 <- as.integer64(i32); 
+#! i64 <- system.time(for(i in 1:100)i64[sample(1e6, 1e3)] <- 1:1e3)
+#! rm(i64); gc()
+#! #I64 <- as.int64(i32); 
+#! #I64 <- system.time(for(i in 1:10)I64[sample(1e6, 1e3)] <- 1:1e3)*10
+#! #rm(I64); gc()
+#! #(T64-ts32)/(t64-ts32)
+#! (t64-ts32)/(t32-ts32)
+#! 
+#! 
+#! tdfi32 <- system.time(dfi32 <- data.frame(a=i32, b=i32, c=i32))
+#! tdfsi32 <- system.time(dfi32[1e6:1,])
+#! fi32 <- tempfile()
+#! tdfwi32 <- system.time(write.csv(dfi32, file=fi32, row.names=FALSE))
+#! tdfri32 <- system.time(read.csv(fi32, colClasses=rep("integer", 3)))
+#! unlink(fi32)
+#! rm(dfi32); gc()
+#! 
+#! i64 <- as.integer64(i32); 
+#! tdfi64 <- system.time(dfi64 <- data.frame(a=i64, b=i64, c=i64))
+#! tdfsi64 <- system.time(dfi64[1e6:1,])
+#! fi64 <- tempfile()
+#! tdfwi64 <- system.time(write.csv(dfi64, file=fi64, row.names=FALSE))
+#! tdfri64 <- system.time(read.csv(fi64, colClasses=rep("integer64", 3)))
+#! unlink(fi64)
+#! rm(i64, dfi64); gc()
+#! 
+#! #I64 <- as.int64(i32); 
+#! #tdfI64 <- system.time(dfI64<-data.frame(a=I64, b=I64, c=I64))
+#! #tdfsI64 <- system.time(dfI64[1e6:1,])
+#! #fI64 <- tempfile()
+#! #tdfwI64 <- system.time(write.csv(dfI64, file=fI64, row.names=FALSE))
+#! #tdfrI64 <- system.time(read.csv(fI64, colClasses=rep("int64", 3)))
+#! #unlink(fI64)
+#! #rm(I64, dfI64); gc()
+#! 
+#! message("-- integer64 coerces 40x/6x faster to data.frame than int64
+#! (and factor 1/9 slower than integer) --")
+#! #tdfI64/tdfi64
+#! tdfi64/tdfi32
+#! message("-- integer64 subscripts from data.frame 20x/2.5x faster than int64
+#!  (and 3x/13x slower than integer) --")
+#! #tdfsI64/tdfsi64
+#! tdfsi64/tdfsi32
+#! message("-- integer64 csv writes about 2x/0.5x faster than int64
+#! (and about 1.5x/5x slower than integer) --")
+#! #tdfwI64/tdfwi64
+#! tdfwi64/tdfwi32
+#! message("-- integer64 csv reads about 3x/1.5 faster than int64
+#! (and about 2x slower than integer) --")
+#! #tdfrI64/tdfri64
+#! tdfri64/tdfri32
+#! 
+#! rm(i32, d64); gc()
+#! 
+#! 
+#! message("-- investigating the impact on garbage collection: --")
+#! message("-- the fragmented structure of int64 messes up R's RAM --")
+#! message("-- and slows down R's gargbage collection just by existing --")
+#! 
+#! td32 <- double(21)
+#! td32[1] <- system.time(d64 <- double(1e7))[3]
+#! for (i in 2:11)td32[i] <- system.time(gc(), gcFirst=FALSE)[3]
+#! rm(d64)
+#! for (i in 12:21)td32[i] <- system.time(gc(), gcFirst=FALSE)[3]
+#! 
+#! t64 <- double(21)
+#! t64[1] <- system.time(i64 <- integer64(1e7))[3]
+#! for (i in 2:11)t64[i] <- system.time(gc(), gcFirst=FALSE)[3]
+#! rm(i64)
+#! for (i in 12:21)t64[i] <- system.time(gc(), gcFirst=FALSE)[3]
+#! 
+#! #T64 <- double(21)
+#! #T64[1] <- system.time(I64 <- int64(1e7))[3]
+#! #for (i in 2:11)T64[i] <- system.time(gc(), gcFirst=FALSE)[3]
+#! #rm(I64)
+#! #for (i in 12:21)T64[i] <- system.time(gc(), gcFirst=FALSE)[3]
+#! 
+#! #matplot(1:21, cbind(td32, t64, T64), pch=c("d","i","I"), log="y")
+#! matplot(1:21, cbind(td32, t64), pch=c("d","i"), log="y")
+#!   }
+#!
+#! }
+#! \name{identical.integer64}
+#! \alias{identical.integer64}
+#! \title{
+#!    Identity function for class 'integer64'
+#! }
+#! \description{
+#!   This will discover any deviation between objects containing integer64 vectors. 
+#! }
+#! \usage{
+#!  identical.integer64(x, y, num.eq = FALSE, single.NA = FALSE
+#! , attrib.as.set = TRUE, ignore.bytecode = TRUE)
+#! }
+#! \arguments{
+#!   \item{x}{ atomic vector of class 'integer64' }
+#!   \item{y}{ atomic vector of class 'integer64' }
+#!   \item{num.eq}{ see \code{\link{identical}} }
+#!   \item{single.NA}{ see \code{\link{identical}} }
+#!   \item{attrib.as.set}{ see \code{\link{identical}} }
+#!   \item{ignore.bytecode}{ see \code{\link{identical}} }
+#! }
+#! \details{
+#!   This is simply a wrapper to \code{\link{identical}} with default arguments \code{num.eq = FALSE, single.NA = FALSE}.
+#! }
+#! \value{
+#!   A single logical value, \code{TRUE} or \code{FALSE}, never \code{NA} and never anything other than a single value. 
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{==.integer64}} \code{\link{identical}} \code{\link{integer64}}  }
+#! \examples{
+#!   i64 <- as.double(NA); class(i64) <- "integer64"
+#!   identical(i64-1, i64+1)
+#!   identical.integer64(i64-1, i64+1)
+#! }
+
+
+#! \name{as.character.integer64}
+#! \alias{as.character.integer64}
+#! \alias{as.double.integer64}
+#! \alias{as.integer.integer64}
+#! \alias{as.logical.integer64}
+#! \alias{as.bitstring}
+#! \alias{as.bitstring.integer64}
+#! \alias{as.factor.integer64}
+#! \alias{as.ordered.integer64}
+#! \title{
+#!    Coerce from integer64
+#! }
+#! \description{
+#!   Methods to coerce integer64 to other atomic types. 
+#!   'as.bitstring' coerces to a human-readable bit representation (strings of zeroes and ones). 
+#!   The methods \code{\link{format}}, \code{\link{as.character}}, \code{\link{as.double}},
+#!   \code{\link{as.logical}}, \code{\link{as.integer}} do what you would expect.
+#! }
+#! \usage{
+#!  as.bitstring(x, \dots)
+#!  \method{as.bitstring}{integer64}(x, \dots)
+#!  \method{as.character}{integer64}(x, \dots)
+#!  \method{as.double}{integer64}(x, keep.names = FALSE, \dots)
+#!  \method{as.integer}{integer64}(x, \dots)
+#!  \method{as.logical}{integer64}(x, \dots)
+#!  \method{as.factor}{integer64}(x)
+#!  \method{as.ordered}{integer64}(x)
+#! }
+#! \arguments{
+#!   \item{x}{ an integer64 vector }
+#!   \item{keep.names}{ FALSE, set to TRUE to keep a names vector }
+#!   \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+#! }
+#! \value{
+#!   \code{as.bitstring} returns a string of . \cr
+#!   The other methods return atomic vectors of the expected types
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{as.integer64.character}} \code{\link{integer64}}  }
+#! \examples{
+#!   as.character(lim.integer64())
+#!   as.bitstring(lim.integer64())
+#! }
+
+#! \name{as.integer64.character}
+#! \alias{as.integer64}
+#! \alias{as.integer64.integer64}
+#! \alias{as.integer64.NULL}
+#! \alias{as.integer64.character}
+#! \alias{as.integer64.double}
+#! \alias{as.integer64.integer}
+#! \alias{as.integer64.logical}
+#! \alias{as.integer64.factor}
+#! \alias{NA_integer64_}
+#! \title{
+#!    Coerce to integer64
+#! }
+#! \description{
+#!   Methods to coerce from other atomic types to integer64. 
+#! }
+#! \usage{
+#!  NA_integer64_
+#!  as.integer64(x, \dots)
+#!  \method{as.integer64}{integer64}(x, \dots)
+#!  \method{as.integer64}{NULL}(x, \dots)
+#!  \method{as.integer64}{character}(x, \dots)
+#!  \method{as.integer64}{double}(x, keep.names = FALSE, \dots)
+#!  \method{as.integer64}{integer}(x, \dots)
+#!  \method{as.integer64}{logical}(x, \dots)
+#!  \method{as.integer64}{factor}(x, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{ an atomic vector }
+#!   \item{keep.names}{ FALSE, set to TRUE to keep a names vector }
+#!   \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+#! }
+#! \details{
+#!   \code{as.integer64.character} is realized using C function \code{strtoll} which does not support scientific notation. 
+#!   Instead of '1e6' use '1000000'.
+#! }
+#! \value{
+#!   The other methods return atomic vectors of the expected types
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{as.character.integer64}} \code{\link{integer64}}  }
+#! \examples{
+#!   as.integer64(as.character(lim.integer64()))
+#! }
+
+
+#! \name{extract.replace.integer64}
+#! \alias{[.integer64}
+#! \alias{[[.integer64}
+#! \alias{[[<-.integer64}
+#! \alias{[<-.integer64}
+#! \title{
+#!    Extract or Replace Parts of an integer64 vector
+#! }
+#! \description{
+#!   Methods to extract and replace parts of an integer64 vector.
+#! }
+#! \usage{
+#!  \method{[}{integer64}(x, \dots)
+#!  \method{[}{integer64}(x, \dots) <- value 
+#!  \method{[[}{integer64}(x, \dots)
+#!  \method{[[}{integer64}(x, \dots) <- value
+#! }
+#! \arguments{
+#!   \item{x}{ an atomic vector }
+#!   \item{value}{ an atomic vector with values to be assigned }
+#!   \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+#! }
+#! \note{
+#!   You should not subscript non-existing elements and not use \code{NA}s as subscripts.
+#!   The current implementation returns \code{9218868437227407266} instead of \code{NA}.
+#! }
+#! \value{
+#!   A vector or scalar of class 'integer64'
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{[}} \code{\link{integer64}}  }
+#! \examples{
+#!   as.integer64(1:12)[1:3]
+#!   x <- as.integer64(1:12)
+#!   dim(x) <- c(3,4)
+#!   x
+#!   x[]
+#!   x[,2:3]
+#! }
+
+#! \name{format.integer64}
+#! \alias{format.integer64}
+#! \alias{is.na.integer64}
+#! \alias{is.nan.integer64}
+#! \alias{is.finite.integer64}
+#! \alias{is.infinite.integer64}
+#! \alias{!.integer64}
+#! \alias{sign.integer64}
+#! \alias{abs.integer64}
+#! \alias{sqrt.integer64}
+#! \alias{log.integer64}
+#! \alias{log2.integer64}
+#! \alias{log10.integer64}
+#! \alias{floor.integer64}
+#! \alias{ceiling.integer64}
+#! \alias{trunc.integer64}
+#! \alias{round.integer64}
+#! \alias{signif.integer64}
+#! \alias{scale.integer64}
+#! \title{
+#!    Unary operators and functions for integer64 vectors
+#! }
+#! \description{
+#!   Unary operators and functions for integer64 vectors.
+#! }
+#! \usage{
+#! \method{format}{integer64}(x, justify="right", \dots)
+#! \method{is.na}{integer64}(x)
+#! \method{is.nan}{integer64}(x)
+#! \method{is.finite}{integer64}(x)
+#! \method{is.infinite}{integer64}(x)
+#! \method{!}{integer64}(x)
+#! \method{sign}{integer64}(x)
+#! \method{abs}{integer64}(x)
+#! \method{sqrt}{integer64}(x)
+#! \method{log}{integer64}(x, base)
+#! \method{log2}{integer64}(x)
+#! \method{log10}{integer64}(x)
+#! \method{floor}{integer64}(x)
+#! \method{ceiling}{integer64}(x)
+#! \method{trunc}{integer64}(x, \dots)
+#! \method{round}{integer64}(x, digits=0)
+#! \method{signif}{integer64}(x, digits=6)
+#! \method{scale}{integer64}(x, center = TRUE, scale = TRUE)
+#! }
+#! \arguments{
+#!   \item{x}{ an atomic vector of class 'integer64'}
+#!   \item{base}{ an atomic scalar (we save 50\% log-calls by not allowing a vector base) }
+#!   \item{digits}{ integer indicating the number of decimal places (round) or significant digits (signif) to be used. 
+#!                  Negative values are allowed (see \code{\link{round}}) }
+#!   \item{justify}{ should it be right-justified (the default), left-justified, centred or left alone. }
+#!   \item{center}{ see \code{\link{scale}} }
+#!   \item{scale}{  see \code{\link{scale}} }
+#!   \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+#! }
+#! \value{
+#!   \code{\link{format}} returns a character vector \cr
+#!   \code{\link{is.na}} and \code{\link{!}} return a logical vector \cr
+#!   \code{\link{sqrt}}, \code{\link{log}}, \code{\link{log2}} and \code{\link{log10}} return a double vector \cr
+#!   \code{\link{sign}}, \code{\link{abs}}, \code{\link{floor}}, \code{\link{ceiling}}, \code{\link{trunc}} and 
+#!   \code{\link{round}} return a vector of class 'integer64' \cr
+#!   \code{\link{signif}} is not implemented 
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{xor.integer64}} \code{\link{integer64}}  }
+#! \examples{
+#!   sqrt(as.integer64(1:12))
+#! }
+
+
+#! \name{xor.integer64}
+#! \alias{&.integer64}
+#! \alias{|.integer64}
+#! \alias{xor.integer64}
+#! \alias{!=.integer64}
+#! \alias{==.integer64}
+#! \alias{<.integer64}
+#! \alias{<=.integer64}
+#! \alias{>.integer64}
+#! \alias{>=.integer64}
+#! \alias{+.integer64}
+#! \alias{-.integer64}
+#! \alias{*.integer64}
+#! \alias{^.integer64}
+#! \alias{/.integer64}
+#! \alias{\%/\%.integer64}
+#! \alias{\%\%.integer64}
+#! \alias{binattr}
+#! \title{
+#!    Binary operators for integer64 vectors
+#! }
+#! \description{
+#!   Binary operators for integer64 vectors.
+#! }
+#! \usage{
+#! \method{&}{integer64}(e1,e2)
+#! \method{|}{integer64}(e1,e2)
+#! \method{xor}{integer64}(x,y)
+#! \method{!=}{integer64}(e1,e2)
+#! \method{==}{integer64}(e1,e2)
+#! \method{<}{integer64}(e1,e2)
+#! \method{<=}{integer64}(e1,e2)
+#! \method{>}{integer64}(e1,e2)
+#! \method{>=}{integer64}(e1,e2)
+#! \method{+}{integer64}(e1,e2)
+#! \method{-}{integer64}(e1,e2)
+#! \method{*}{integer64}(e1,e2)
+#! \method{^}{integer64}(e1,e2)
+#! \method{/}{integer64}(e1,e2)
+#! \method{\%/\%}{integer64}(e1,e2)
+#! \method{\%\%}{integer64}(e1,e2)
+#! binattr(e1,e2) # for internal use only
+#! }
+#! \arguments{
+#!   \item{e1}{ an atomic vector of class 'integer64'}
+#!   \item{e2}{ an atomic vector of class 'integer64'}
+#!   \item{x}{ an atomic vector of class 'integer64'}
+#!   \item{y}{ an atomic vector of class 'integer64'}
+#! }
+#! \value{
+#!   \code{\link{&}}, \code{\link{|}}, \code{\link{xor}}, \code{\link{!=}}, \code{\link{==}}, 
+#!   \code{\link{<}}, \code{\link{<=}}, \code{\link{>}}, \code{\link{>=}} return a logical vector \cr
+#!   \code{\link{^}} and \code{\link{/}} return a double vector\cr
+#!   \code{\link{+}}, \code{\link{-}}, \code{\link{*}}, \code{\link{\%/\%}}, \code{\link{\%\%}}
+#!    return a vector of class 'integer64'
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{format.integer64}} \code{\link{integer64}}  }
+#! \examples{
+#!   as.integer64(1:12) - 1
+#! }
+
+
+#! \name{sum.integer64}
+#! \alias{all.integer64}
+#! \alias{any.integer64}
+#! \alias{min.integer64}
+#! \alias{max.integer64}
+#! \alias{range.integer64}
+#! \alias{lim.integer64}
+#! \alias{sum.integer64}
+#! \alias{prod.integer64}
+#! \title{
+#!    Summary functions for integer64 vectors
+#! }
+#! \description{
+#!   Summary functions for integer64 vectors. 
+#!   Function 'range' without arguments returns the smallest and largest value of the 'integer64' class.
+#! }
+#! \usage{
+#! \method{all}{integer64}(\dots, na.rm = FALSE)
+#! \method{any}{integer64}(\dots, na.rm = FALSE)
+#! \method{min}{integer64}(\dots, na.rm = FALSE)
+#! \method{max}{integer64}(\dots, na.rm = FALSE)
+#! \method{range}{integer64}(\dots, na.rm = FALSE)
+#! lim.integer64()
+#! \method{sum}{integer64}(\dots, na.rm = FALSE)
+#! \method{prod}{integer64}(\dots, na.rm = FALSE)
+#! }
+#! \arguments{
+#!   \item{\dots}{ atomic vectors of class 'integer64'}
+#!   \item{na.rm}{ logical scalar indicating whether to ignore NAs }
+#! }
+#! \details{
+#!   The numerical summary methods always return \code{integer64}. 
+#!   Therefor the methods for \code{min},\code{max} and \code{range} do not return \code{+Inf,-Inf}
+#!   on empty arguments, but \code{+9223372036854775807, -9223372036854775807} (in this sequence).
+#!   The same is true if only  \code{NA}s are submitted with argument \code{na.rm=TRUE}. 
+#!  \cr
+#!   \code{lim.integer64} returns these limits in proper order \code{-9223372036854775807, +9223372036854775807} and without a \code{\link{warning}}.
+#! }
+#! \value{
+#!   \code{\link{all}} and \code{\link{any}} return a logical scalar\cr
+#!   \code{\link{range}} returns a integer64 vector with two elements\cr
+#!   \code{\link{min}}, \code{\link{max}}, \code{\link{sum}} and \code{\link{prod}} return a integer64 scalar
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{mean.integer64}} \code{\link{cumsum.integer64}} \code{\link{integer64}}  }
+#! \examples{
+#!   lim.integer64()
+#!   range(as.integer64(1:12))
+#! }
+
+
+#! \name{cumsum.integer64}
+#! \alias{cummin.integer64}
+#! \alias{cummax.integer64}
+#! \alias{cumsum.integer64}
+#! \alias{cumprod.integer64}
+#! \alias{diff.integer64}
+#! \title{
+#!    Cumulative Sums, Products, Extremes and lagged differences
+#! }
+#! \description{
+#!   Cumulative Sums, Products, Extremes and lagged differences
+#! }
+#! \usage{
+#! \method{cummin}{integer64}(x)
+#! \method{cummax}{integer64}(x)
+#! \method{cumsum}{integer64}(x)
+#! \method{cumprod}{integer64}(x)
+#! \method{diff}{integer64}(x, lag = 1L, differences = 1L, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{ an atomic vector of class 'integer64'}
+#!   \item{lag}{ see \code{\link{diff}} }
+#!   \item{differences}{ see \code{\link{diff}} }
+#!   \item{\dots}{ ignored }
+#! }
+#! \value{
+#!   \code{\link{cummin}}, \code{\link{cummax}} , \code{\link{cumsum}} and \code{\link{cumprod}} 
+#!      return a integer64 vector of the same length as their input\cr
+#!   \code{\link{diff}} returns a integer64 vector shorter by \code{lag*differences} elements \cr
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{sum.integer64}} \code{\link{integer64}}  }
+#! \examples{
+#!   cumsum(rep(as.integer64(1), 12))
+#!   diff(as.integer64(c(0,1:12)))
+#!   cumsum(as.integer64(c(0, 1:12)))
+#!   diff(cumsum(as.integer64(c(0,0,1:12))), differences=2)
+#! }
+
+
+#! \name{c.integer64}
+#! \alias{c.integer64}
+#! \alias{cbind.integer64}
+#! \alias{rbind.integer64}
+#! \title{
+#!    Concatenating integer64 vectors
+#! }
+#! \description{
+#!   The ususal functions 'c', 'cbind' and 'rbind'
+#! }
+#! \usage{
+#! \method{c}{integer64}(\dots, recursive = FALSE)
+#! \method{cbind}{integer64}(\dots)
+#! \method{rbind}{integer64}(\dots)
+#! }
+#! \arguments{
+#!   \item{\dots}{ two or more arguments coerced to 'integer64' and passed to \code{\link{NextMethod}} }
+#!   \item{recursive}{ logical. If \code{recursive = TRUE}, the function recursively descends through lists (and pairlists) combining all their elements into a vector. }
+#! }
+#! \value{
+#!   \code{\link{c}} returns a integer64 vector of the total length of the input \cr
+#!   \code{\link{cbind}} and \code{\link{rbind}} return a integer64 matrix
+#! }
+#! \note{
+#!   R currently only dispatches generic 'c' to method 'c.integer64' if the first argument is 'integer64'
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{rep.integer64}} \code{\link{seq.integer64}} 
+#!           \code{\link{as.data.frame.integer64}} \code{\link{integer64}}  
+#! }
+#! \examples{
+#!   c(as.integer64(1), 2:6)
+#!   cbind(1:6, as.integer(1:6))
+#!   rbind(1:6, as.integer(1:6))
+#! }
+
+
+#! \name{rep.integer64}
+#! \alias{rep.integer64}
+#! \title{
+#!    Replicate elements of integer64 vectors
+#! }
+#! \description{
+#!   Replicate elements of integer64 vectors
+#! }
+#! \usage{
+#! \method{rep}{integer64}(x, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{ a vector of 'integer64' to be replicated }
+#!   \item{\dots}{ further arguments passed to \code{\link{NextMethod}} }
+#! }
+#! \value{
+#!   \code{\link{rep}} returns a integer64 vector
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{c.integer64}} \code{\link{rep.integer64}} 
+#!           \code{\link{as.data.frame.integer64}} \code{\link{integer64}}  
+#! }
+#! \examples{
+#!   rep(as.integer64(1:2), 6)
+#!   rep(as.integer64(1:2), c(6,6))
+#!   rep(as.integer64(1:2), length.out=6)
+#! }
+
+
+#! \name{seq.integer64}
+#! \alias{seq.integer64}
+#! \title{
+#!    integer64: Sequence Generation
+#! }
+#! \description{
+#!   Generating sequence of integer64 values
+#! }
+#! \usage{
+#! \method{seq}{integer64}(from = NULL, to = NULL, by = NULL, length.out = NULL, along.with = NULL, \dots)
+#! }
+#! \arguments{
+#!   \item{from}{ integer64 scalar (in order to dispatch the integer64 method of \code{\link{seq}} }
+#!   \item{to}{ scalar }
+#!   \item{by}{ scalar }
+#!   \item{length.out}{ scalar }
+#!   \item{along.with}{ scalar }
+#!   \item{\dots}{ ignored }
+#! }
+#! \details{
+#!   \code{seq.integer64} does coerce its arguments 'from', 'to' and 'by' to \code{integer64}.
+#!   If not provided, the argument 'by' is automatically determined as \code{+1} or \code{-1},
+#!   but the size of 'by' is not calculated as in \code{\link{seq}} (because this might result in a non-integer value).
+#! }
+#! \value{
+#!   an integer64 vector with the generated sequence
+#! }
+#! \note{
+#!   In base R \code{\link{:}} currently is not generic and does not dispatch, see section "Limitations inherited from Base R" in \code{\link{integer64}}
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{c.integer64}} \code{\link{rep.integer64}} 
+#!           \code{\link{as.data.frame.integer64}} \code{\link{integer64}}  
+#! }
+#! \examples{
+#!   # colon not activated: as.integer64(1):12
+#!   seq(as.integer64(1), 12, 2)
+#!   seq(as.integer64(1), by=2, length.out=6)
+#! }
+
+
+#! \name{as.data.frame.integer64}
+#! \alias{as.data.frame.integer64}
+#! \title{
+#!    integer64: Coercing to data.frame column
+#! }
+#! \description{
+#!   Coercing integer64 vector to data.frame.
+#! }
+#! \usage{
+#!   \method{as.data.frame}{integer64}(x, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{ an integer64 vector }
+#!   \item{\dots}{ passed to NextMethod \code{\link{as.data.frame}} after removing the 'integer64' class attribute }
+#! }
+#! \value{
+#!   a one-column data.frame containing an integer64 vector
+#! }
+#! \details{
+#!   'as.data.frame.integer64' is rather not intended to be called directly,
+#!   but it is required to allow integer64 as data.frame columns.
+#! }
+#! \note{
+#!   This is currently very slow -- any ideas for improvement?
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \seealso{ 
+#!   \code{\link{cbind.integer64}} \code{\link{integer64}}  %as.vector.integer64 removed as requested by the CRAN maintainer \code{\link{as.vector.integer64}} 
+#! }
+#! \examples{
+#!   as.data.frame.integer64(as.integer64(1:12))
+#!   data.frame(a=1:12, b=as.integer64(1:12))
+#! }
+
+
+
+#! \name{plusclass}
+#! \alias{plusclass}
+#! \alias{minusclass}
+#! \title{
+#!    integer64: Maintaining S3 class attribute
+#! }
+#! \description{
+#!   Maintaining integer64 S3 class attribute.
+#! }
+#! \usage{
+#!   plusclass(class, whichclass)
+#!   minusclass(class, whichclass)
+#! }
+#! \arguments{
+#!   \item{class}{ NULL or a character vector of class attributes }
+#!   \item{whichclass}{ the (single) class name to add or remove from the class vector  }
+#! }
+#! \value{
+#!   NULL or a character vector of class attributes
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ classes }
+#! \keyword{ manip }
+#! \keyword{ internal }
+#! \seealso{ 
+#!   \code{\link{oldClass}} \code{\link{integer64}}  
+#! }
+#! \examples{
+#!   plusclass("inheritingclass","integer64")
+#!   minusclass(c("inheritingclass","integer64"), "integer64")
+#! }
+
+
+# if (!exists(":.default")){
+	# ":.default" <- get(":")
+	# ":" <- function(from,to)UseMethod(":")
+# }
+
+setOldClass("integer64")
+
+
+identical.integer64 <- function(x, y
+, num.eq = FALSE
+, single.NA = FALSE
+, attrib.as.set = TRUE
+, ignore.bytecode = TRUE
+)
+identical(x=x, y=y
+, num.eq = num.eq
+, single.NA = single.NA
+, attrib.as.set = attrib.as.set
+, ignore.bytecode = ignore.bytecode
+)
+
+
+as.integer64 <- function (x, ...) 
+UseMethod("as.integer64")
+
+as.bitstring <- function(x, ...)
+UseMethod("as.bitstring")
+
+
+
+minusclass <- function(class, whichclass){
+  if (length(class)){
+	  i <- whichclass==class
+	  if (any(i))
+		class[!i]
+	  else
+		class
+  }else
+    class
+}
+
+plusclass <- function(class, whichclass){
+  if (length(class)){
+	  i <- whichclass==class
+	  if (any(i))
+		class
+	  else
+		c(class, whichclass)
+  }else
+    whichclass
+}
+
+binattr <- function(e1,e2){
+  d1 <- dim(e1)
+  d2 <- dim(e2)
+  n1 <- length(e1)
+  n2 <- length(e2)
+  if (length(d1)){
+    if (length(d2)){
+	  if (!identical(dim(e1),dim(e2)))
+		stop("non-conformable arrays")
+	}else{
+	  if (n2>n1)
+	    stop("length(e2) does not match dim(e1)")
+	  if (n1%%n2)
+		warning("length(e1) not a multiple length(e2)")
+	}
+	attributes(e1)
+  }else{
+    if (length(d2)){
+	  if (n1>n2)
+	    stop("length(e1) does not match dim(n2)")
+	  if (n2%%n1)
+		warning("length(e2) not a multiple length(e1)")
+	  attributes(e2)
+	}else{
+	  if (n1<n2){
+		if (n2%%n1)
+			warning("length(e2) not a multiple length(e1)")
+	  }else{
+		if (n1%%n2)
+			warning("length(e1) not a multiple length(e2)")
+	  }
+	  attributes(e1)
+	}
+  }
+}
+
+
+integer64 <- function(length=0){
+  ret <- double(length)
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+is.integer64 <- function(x)inherits(x, "integer64")
+
+as.integer64.NULL <- function (x, ...){
+  ret <- double()
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+as.integer64.integer64 <- function(x, ...)x
+
+as.integer64.double <- function(x, keep.names=FALSE, ...){
+  ret <- double(length(x))
+  .Call(C_as_integer64_double, x, ret)
+  if (keep.names)
+    names(ret) <- names(x)
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+as.integer64.logical <- as.integer64.integer <- function(x, ...){
+  ret <- double(length(x))
+  .Call(C_as_integer64_integer, x, ret)
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+as.integer64.character <- function(x, ...){
+  n <- length(x)
+  ret <- rep(as.double(NA), n)
+  .Call(C_as_integer64_character, x, ret)
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+as.integer64.factor <- function(x, ...)
+as.integer64(unclass(x), ...)
+
+as.double.integer64 <- function(x, keep.names=FALSE, ...){
+  ret <- double(length(x))
+  .Call(C_as_double_integer64, x, ret)
+  if (keep.names)
+    names(ret) <- names(x)
+  ret
+}
+
+as.integer.integer64 <- function(x, ...){
+  ret <- integer(length(x))
+  .Call(C_as_integer_integer64, x, ret)
+  ret
+}
+
+as.logical.integer64 <- function(x, ...){
+  ret <- logical(length(x))
+  .Call(C_as_logical_integer64, x, ret)
+  ret
+}
+
+as.character.integer64 <- function(x, ...){
+  n <- length(x)
+  ret <- rep(as.character(NA), n)
+  .Call(C_as_character_integer64, x, ret)
+  ret
+}
+
+as.bitstring.integer64 <- function(x, ...){
+  n <- length(x)
+  ret <- rep(as.character(NA), n)
+  .Call(C_as_bitstring_integer64, x, ret)
+  ret
+}
+
+# read.table expects S4 as() 
+setAs("character","integer64",function(from)as.integer64.character(from))
+setAs("integer64","character",function(from)as.character.integer64(from))
+
+# this is a trick to generate NA_integer64_ for namespace export before 
+# as.integer64() is available because dll is not loaded
+NA_integer64_ <- unserialize(as.raw(c(0x58, 0x0a, 0x00, 0x00, 0x00, 0x02, 0x00, 0x03, 0x03, 
++ 0x00, 0x00, 0x02, 0x03, 0x00, 0x00, 0x00, 0x03, 0x0e, 0x00, 0x00, 
++ 0x00, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 
++ 0x00, 0x04, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0x09, 
++ 0x00, 0x00, 0x00, 0x05, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x00, 0x00, 
++ 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0x09, 0x00, 
++ 0x00, 0x00, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x36, 
++ 0x34, 0x00, 0x00, 0x00, 0xfe)))
+
+"length<-.integer64" <- function(x, value){
+  cl <- oldClass(x)
+  n <- length(x)
+  x <- NextMethod()
+  oldClass(x) <- cl
+  if (value>n)
+    x[(n+1):value] <- 0L
+  x
+}
+
+
+format.integer64 <- function(x, justify="right", ...){
+  a <- attributes(x)
+  x <- as.character(x)
+  ret <- format(x, justify=justify, ...)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+print.integer64 <- function(x, quote=FALSE, ...){
+  cat("integer64\n")
+  a <- attributes(x)
+  ret <- as.character(x)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  print(ret, quote=quote, ...)
+  invisible(x)
+}
+
+str.integer64 <- function(object
+, vec.len  = strO$vec.len
+, give.head = TRUE
+, give.length = give.head
+, ...
+){
+  strO <- strOptions()
+  vec.len <- 2*vec.len
+  n <- length(object)
+  if (n>vec.len)
+    object <- object[seq_len(vec.len)]
+  cat(if (give.head)paste("integer64 ", if (give.length && n>1) paste("[1:",n,"] ",sep=""), sep=""), paste(as.character(object), collapse=" "),if(n>vec.len)" ...", " \n", sep="")
+  invisible()
+}
+
+"[.integer64" <- function(x,...){
+  cl <- oldClass(x)
+  ret <- NextMethod()
+  oldClass(ret) <- cl
+  remcache(ret)
+  ret
+}
+
+"[<-.integer64" <- function(x,...,value){
+  cl <- oldClass(x)
+  value <- as.integer64(value)
+  ret <- NextMethod()
+  oldClass(ret) <- cl
+  ret
+}
+
+"[[.integer64" <- function(x,...){
+  cl <- oldClass(x)
+  ret <- NextMethod()
+  oldClass(ret) <- cl
+  ret
+}
+
+"[[<-.integer64" <- function(x,...,value){
+  cl <- oldClass(x)
+  value <- as.integer64(value)
+  ret <- NextMethod()
+  oldClass(ret) <- cl
+  ret
+}
+
+c.integer64 <-
+function (..., recursive = FALSE) 
+{
+	l <- list(...)
+	K <- length(l)
+	for (k in 1:K){
+		if (recursive && is.list(l[[k]])){
+			l[[k]] <- do.call("c.integer64", c(l[[k]], list(recursive = TRUE)))
+		}else{
+			if (!is.integer64(l[[k]])) {
+				nam <- names(l[[k]])
+				l[[k]] <- as.integer64(l[[k]])
+				names(l[[k]]) <- nam
+			}
+			oldClass(l[[k]]) <- NULL
+		}
+	}
+	ret <- do.call("c", l)
+	oldClass(ret) <- "integer64"
+	ret
+}
+
+
+cbind.integer64 <- function(...){
+  l <- list(...)
+	K <- length(l)
+  for (k in 1:K){
+		if (!is.integer64(l[[k]])){
+			nam <- names(l[[k]])
+			l[[k]] <- as.integer64(l[[k]])
+			names(l[[k]]) <- nam
+		}
+		oldClass(l[[k]]) <- NULL
+  }
+  ret <- do.call("cbind", l)
+	oldClass(ret) <- "integer64"
+  ret
+}
+
+rbind.integer64 <- function(...){
+  l <- list(...)
+	K <- length(l)
+  for (k in 1:K){
+		if (!is.integer64(l[[k]])){
+			nam <- names(l[[k]])
+			l[[k]] <- as.integer64(l[[k]])
+			names(l[[k]]) <- nam
+		}
+		oldClass(l[[k]]) <- NULL
+  }
+  ret <- do.call("rbind", l)
+	oldClass(ret) <- "integer64"
+  ret
+}
+
+# tenfold runtime if using attr() here instead of setattr()
+# as.data.frame.integer64 <- function(x, ...){
+  # cl <- oldClass(x)
+  # oldClass(x) <- minusclass(cl, "integer64")
+  # ret <- as.data.frame(x, ...)
+  # k <- length(ret)
+  # for (i in 1:k)
+    # oldClass(ret[[i]]) <- cl
+  # ret
+# }
+as.data.frame.integer64 <- function(x, ...){
+  cl <- oldClass(x)
+  on.exit(setattr(x, "class", cl))
+  setattr(x, "class", minusclass(cl, "integer64"))
+  ret <- as.data.frame(x, ...)
+  k <- length(ret)
+  for (i in 1:k)
+   setattr(ret[[i]], "class", cl) 
+  ret
+}
+
+
+"rep.integer64" <- function(x, ...){
+	cl <- oldClass(x)
+	ret <- NextMethod()
+	oldClass(ret) <- cl
+	ret
+}
+
+# FIXME no method dispatch for :
+":.integer64" <- function(from, to){
+  from <- as.integer64(from)
+  to <- as.integer64(to)
+  ret <- double(as.integer(to-from+1L))
+  .Call(C_seq_integer64, from, as.integer64(1L), ret)
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+"seq.integer64" <- function(from=NULL, to=NULL, by=NULL, length.out=NULL, along.with=NULL, ...){
+    if (is.null(length.out))
+      length.out <- length(along.with)
+    else 
+      length.out <- as.integer(length.out)
+	
+    if (is.null(by)){
+      if (is.null(from) || is.null(to))
+	    by <- as.integer64(1L)
+	  else
+	    by <- as.integer64(sign(to-from))
+    }else{
+	  by <- as.integer64(by)
+	  if ((!is.null(from)) && (!is.null(to)) && sign(by)!=sign(to-from))
+        stop("wrong sign of 'by' argument")
+    }
+  
+    if (is.null(from)){
+      if (length.out && length(to))
+	    from <- to - (length.out-1L)*by
+	  else
+	    from <- as.integer64(1)
+    }else 
+	  from <- as.integer64(from)
+	  
+	if (!length(to)){
+	  if (length.out)
+	    to <- from + (length.out-1L)*by
+      else
+		stop("not enough informatoin provided")
+	}
+	
+    if (!length.out){
+	  length.out <- (to-from) %/% by + 1L
+    }
+
+    if (length.out){
+      if (length.out==1L)
+        return(from)
+      else{
+        #return(cumsum(c(from, rep(by, length.out-1L))))
+		ret <- double(as.integer(length.out))
+		.Call(C_seq_integer64, from, by, ret)
+		oldClass(ret) <- "integer64"
+		return(ret)
+	  }
+    }else
+      return(integer64())
+}
+
+
+"+.integer64" <- function(e1, e2){
+  if (missing(e2))
+    return(e1)
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- double(max(length(e1),length(e2)))
+  .Call(C_plus_integer64, e1, e2, ret)
+  a$class <- plusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"-.integer64" <- function(e1, e2){
+  if (missing(e2)){
+    e2 <- e1
+	e1 <- 0L
+  }
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- double(max(length(e1),length(e2)))
+  .Call(C_minus_integer64, e1, e2, ret)
+  a$class <- plusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"%/%.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- double(max(length(e1), length(e2)))
+  .Call(C_intdiv_integer64, e1, e2, ret)
+  a$class <- plusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"%%.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- double(max(length(e1), length(e2)))
+  .Call(C_mod_integer64, e1, e2, ret)
+  a$class <- plusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+
+"*.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  ret <- double(max(length(e1),length(e2)))
+  if (is.double(e2))  # implies !is.integer64(e2)
+    .Call(C_times_integer64_double, as.integer64(e1), e2, ret)
+  else
+    .Call(C_times_integer64_integer64, as.integer64(e1), as.integer64(e2), ret)
+  a$class <- plusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"^.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  ret <- double(max(length(e1),length(e2)))
+  if (is.double(e2))  # implies !is.integer64(e2)
+    .Call(C_power_integer64_double, as.integer64(e1), e2, ret)
+  else
+    .Call(C_power_integer64_integer64, as.integer64(e1), as.integer64(e2), ret)
+  a$class <- plusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"/.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  ret <- double(max(length(e1),length(e2)))
+  if (is.double(e2))  # implies !is.integer64(e2)
+    .Call(C_divide_integer64_double, as.integer64(e1), e2, ret)
+  else
+	  .Call(C_divide_integer64_integer64, as.integer64(e1), as.integer64(e2), ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+
+"sign.integer64" <- function(x){
+  a <- attributes(x)
+  ret <- double(length(x))
+  .Call(C_sign_integer64, x,ret)
+  attributes(ret) <- a
+  ret
+}
+
+"abs.integer64" <- function(x){
+  a <- attributes(x)
+  ret <- double(length(x))
+  .Call(C_abs_integer64, x,ret)
+  attributes(ret) <- a
+  ret
+}
+
+"sqrt.integer64" <- function(x){
+  a <- attributes(x)
+  ret <- double(length(x))
+  .Call(C_sqrt_integer64, x,ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"log.integer64" <- function(x, base=NULL){
+  a <- attributes(x)
+  ret <- double(max(length(x),length(base)))
+  if (is.null(base))
+	.Call(C_log_integer64, x, ret)
+  else if(length(base)==1){
+    .Call(C_logbase_integer64, x, as.double(base), ret)
+  }else{
+    .Call(C_logvect_integer64, x, as.double(base), ret)
+  }
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"log10.integer64" <- function(x){
+  a <- attributes(x)
+  ret <- double(length(x))
+  .Call(C_log10_integer64, x,ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"log2.integer64" <- function(x){
+  a <- attributes(x)
+  ret <- double(length(x))
+  .Call(C_log2_integer64, x,ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"trunc.integer64" <- function(x, ...)x
+"floor.integer64" <- "ceiling.integer64" <- function(x)x
+
+"signif.integer64" <- function(x, digits=6)x
+
+"scale.integer64" <- function(x, center = TRUE, scale = TRUE)scale(as.double(x, keep.names=TRUE), center=center, scale=scale)
+
+"round.integer64" <- function(x, digits=0){
+  if (digits<0){
+    a <- attributes(x)
+	base <- 10^floor(-digits)
+	ret <- (x%/%base) * base
+    #a$class <- minusclass(a$class, "integer64")
+    attributes(ret) <- a
+	ret
+  }else
+	x
+}
+
+"any.integer64" <- function(..., na.rm = FALSE){
+  l <- list(...)
+  ret <- logical(1)
+  if (length(l)==1){
+		  .Call(C_any_integer64, l[[1]], na.rm, ret)
+		  ret
+  }else{
+	  any(sapply(l, function(e){
+		if (is.integer64(e)){
+		  .Call(C_any_integer64, e, na.rm, ret)
+		  ret
+		}else{
+		  any(e, na.rm = na.rm)
+		}
+	  }), na.rm = na.rm)
+  }
+}
+
+"all.integer64" <- function(..., na.rm = FALSE){
+  l <- list(...)
+  ret <- logical(1)
+  if (length(l)==1){
+		  .Call(C_all_integer64, l[[1]], na.rm, ret)
+		  ret
+  }else{
+	  all(sapply(l, function(e){
+		if (is.integer64(e)){
+		  .Call(C_all_integer64, e, na.rm, ret)
+		  ret
+		}else{
+		  all(e, na.rm = na.rm)
+		}
+	  }), na.rm = na.rm)
+  }
+}
+
+"sum.integer64" <- function(..., na.rm = FALSE){
+  l <- list(...)
+  ret <- double(1)
+  if (length(l)==1){
+		  .Call(C_sum_integer64, l[[1]], na.rm, ret)
+		  oldClass(ret) <- "integer64"
+		  ret
+  }else{
+	  ret <- sapply(l, function(e){
+		if (is.integer64(e)){
+		  .Call(C_sum_integer64, e, na.rm, ret)
+		  ret
+		}else{
+		  as.integer64(sum(e, na.rm = na.rm))
+		}
+	  })
+    oldClass(ret) <- "integer64"
+	  sum(ret, na.rm = na.rm)
+  }
+}
+
+
+
+"prod.integer64" <- function(..., na.rm = FALSE){
+  l <- list(...)
+  ret <- double(1)
+  if (length(l)==1){
+		  .Call(C_prod_integer64, l[[1]], na.rm, ret)
+		  oldClass(ret) <- "integer64"
+		  ret
+  }else{
+      ret <- sapply(l, function(e){
+		if (is.integer64(e)){
+		  .Call(C_prod_integer64, e, na.rm, ret)
+		  ret
+		}else{
+		  as.integer64(prod(e, na.rm = na.rm))
+		}
+	  })
+	  oldClass(ret) <- "integer64"
+	  prod(ret, na.rm = na.rm)
+  }
+}
+
+"min.integer64" <- function(..., na.rm = FALSE){
+  l <- list(...)
+  ret <- double(1)
+  noval <- TRUE
+  if (length(l)==1){
+	if (length(l[[1]]))
+	  noval <- FALSE
+    .Call(C_min_integer64, l[[1]], na.rm, ret)
+    oldClass(ret) <- "integer64"
+  }else{
+	  ret <- sapply(l, function(e){
+	    if (length(e))
+	      noval <<- FALSE
+		if (is.integer64(e)){
+		  .Call(C_min_integer64, e, na.rm, ret)
+		  ret
+		}else{
+		  as.integer64(min(e, na.rm = na.rm))
+		}
+	  })
+	  oldClass(ret) <- "integer64"
+	  ret <- min(ret, na.rm = na.rm)
+  }
+  if (noval)
+	warning("no non-NA value, returning +9223372036854775807")
+  ret
+}
+
+"max.integer64" <- function(..., na.rm = FALSE){
+  l <- list(...)
+  ret <- double(1)
+  noval <- TRUE
+  if (length(l)==1){
+	if (length(l[[1]]))
+	  noval <- FALSE
+	.Call(C_max_integer64, l[[1]], na.rm, ret)
+	oldClass(ret) <- "integer64"
+  }else{
+	ret <- sapply(l, function(e){
+	    if (length(e))
+	      noval <<- FALSE
+		if (is.integer64(e)){
+		  .Call(C_max_integer64, e, na.rm, ret)
+		  ret
+		}else{
+		  as.integer64(max(e, na.rm = na.rm))
+		}
+	})
+	oldClass(ret) <- "integer64"
+	ret <- max(ret, na.rm = na.rm)
+  }
+  if (noval)
+	warning("no non-NA value, returning -9223372036854775807")
+  ret
+}
+
+
+"range.integer64" <- function(..., na.rm = FALSE){
+  ret <- double(2)
+  l <- list(...)
+  noval <- TRUE
+  if (length(l)==1){
+	if (length(l[[1]]))
+	  noval <- FALSE
+	.Call(C_range_integer64, l[[1]], na.rm, ret)
+	oldClass(ret) <- "integer64"
+  }else{
+      ret <- unlist(sapply(l, function(e){
+	    if (length(e))
+	      noval <<- FALSE
+		if (is.integer64(e)){
+		  .Call(C_range_integer64, e, na.rm, ret)
+		  ret
+		}else{
+		  as.integer64(range(e, na.rm = na.rm))
+		}
+	  }))
+	  oldClass(ret) <- "integer64"
+	  ret <- range(ret, na.rm = na.rm)
+  }
+  if (noval)
+	warning("no non-NA value, returning c(+9223372036854775807, -9223372036854775807)")
+  ret
+}
+
+lim.integer64 <- function(){
+    ret <- double(2)
+	.Call(C_lim_integer64, ret)
+	oldClass(ret) <- "integer64"
+	return(ret)
+}
+
+"diff.integer64" <- function(x, lag=1L, differences=1L, ...){
+  lag <- as.integer(lag)
+  n <- length(x)
+  d <- differences <- as.integer(differences)
+  while(d>0L){
+	n <- n - lag
+    if (n<=0L){
+	  ret <- double()
+	  break
+	}
+	if (d==differences){
+	  ret <- double(n)
+      .Call(C_diff_integer64, x, as.integer64(lag), as.integer64(n), ret)
+	}else{
+	  .Call(C_diff_integer64, ret, as.integer64(lag), as.integer64(n), ret)
+	}
+	d <- d - 1L
+  }
+  length(ret) <- n
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+
+"cummin.integer64" <- function(x){
+  ret <- double(length(x))
+  .Call(C_cummin_integer64, x,ret)
+  oldClass(ret) <- "integer64"
+  ret
+}
+"cummax.integer64" <- function(x){
+
+  ret <- double(length(x))
+  .Call(C_cummax_integer64, x,ret)
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+"cumsum.integer64" <- function(x){
+  ret <- double(length(x))
+  .Call(C_cumsum_integer64, x,ret)
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+"cumprod.integer64" <- function(x){
+  ret <- double(length(x))
+  .Call(C_cumprod_integer64, x,ret)
+  oldClass(ret) <- "integer64"
+  ret
+}
+
+
+
+"is.na.integer64" <- function(x){
+  a <- attributes(x)
+  ret <- logical(length(x))
+  .Call(C_isna_integer64, x, ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"is.finite.integer64" <- function(x)!is.na(x)
+"is.infinite.integer64" <- function(x)rep(FALSE, length(x))
+"is.nan.integer64" <- function(x)rep(FALSE, length(x))
+
+
+"==.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- logical(max(length(e1), length(e2)))
+  .Call(C_EQ_integer64, e1, e2, ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"!=.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- logical(max(length(e1), length(e2)))
+  .Call(C_NE_integer64, e1, e2, ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"<.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- logical(max(length(e1), length(e2)))
+  .Call(C_LT_integer64, e1, e2, ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"<=.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- logical(max(length(e1), length(e2)))
+  .Call(C_LE_integer64, e1, e2, ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+">.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- logical(max(length(e1), length(e2)))
+  .Call(C_GT_integer64, e1, e2, ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+">=.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  e1 <- as.integer64(e1)
+  e2 <- as.integer64(e2)
+  ret <- logical(max(length(e1), length(e2)))
+  .Call(C_GE_integer64, e1, e2, ret)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"&.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  ret <- as.logical(e1) & as.logical(e2)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+"|.integer64" <- function(e1, e2){
+  a <- binattr(e1,e2)
+  ret <- as.logical(e1) | as.logical(e2)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+xor.integer64 <- function(x, y){
+  a <- binattr(x,y)
+  ret <- as.logical(x) != as.logical(y)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+
+"!.integer64" <- function(x){
+  a <- attributes(x)
+  ret <- !as.logical(x)
+  a$class <- minusclass(a$class, "integer64")
+  attributes(ret) <- a
+  ret
+}
+
+# as.vector.integer64 removed as requested by the CRAN maintainer
+# as.vector.integer64 <- function(x, mode="any"){
+  # ret <- NextMethod()
+  # if (mode=="any")
+	# oldClass(ret) <- "integer64"
+  # ret
+# }
+
+# bug in R does not dispatch
+is.vector.integer64 <- function(x, mode="any"){
+  cl <- minusclass(oldClass(x), "integer64")
+  a <- attributes(x)
+  a$class <- NULL
+  a$names <- NULL
+  if (is.na(match(mode, c("any","integer64"))) || length(cl) || length(a) )
+    FALSE
+  else
+    TRUE
+}
+
diff --git a/R/patch64.R b/R/patch64.R
new file mode 100644
index 0000000..e661f68
--- /dev/null
+++ b/R/patch64.R
@@ -0,0 +1,166 @@
+# /*
+# R-Code for patching S3 generics
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+# */
+
+#! \name{bit64S3}
+#! \alias{bit64S3}
+#! \alias{:}
+#! \alias{:.default}
+#! \alias{:.integer64}
+#! \alias{is.double}
+#! \alias{is.double.default}
+#! \alias{is.double.integer64}
+#! \alias{match}
+#! \alias{match.default}
+#! \alias{\%in\%}
+#! \alias{\%in\%.default}
+#! \alias{rank}
+#! \alias{rank.default}
+#! %\alias{table}
+#! %\alias{table.default}
+#! \alias{order}
+#! \alias{order.default}
+#! \title{
+#!   Tunrning base R functions into S3 generics for bit64 
+#! }
+#! \description{
+#! 	Turn those base functions S3 generic which are used in bit64
+#! }
+#! \usage{
+#!	from:to
+#!  #--as-cran complains about \method{:}{default}(from, to)
+#!  #--as-cran complains about \method{:}{integer64}(from, to)
+#!	is.double(x)
+#!  \method{is.double}{default}(x)
+#!  \method{is.double}{integer64}(x)
+#! 	match(x, table, ...)
+#!  \method{match}{default}(x, table, ...)
+#! 	x \%in\% table
+#!  \method{\%in\%}{default}(x, table)
+#! 	rank(x, ...)
+#!  \method{rank}{default}(x, ...)
+#! 	%table(...)
+#!  %\method{table}{default}(...)
+#! 	order(...)
+#!  \method{order}{default}(...)
+#! }
+#! \arguments{
+#!   \item{x}{
+#! 	integer64 vector: the values to be matched, optionally carrying a cache created with \code{\link{hashcache}}
+#! }
+#!   \item{table}{
+#! 	integer64 vector: the values to be matched against, optionally carrying a cache created with \code{\link{hashcache}} or \code{\link{sortordercache}}
+#! }
+#!   \item{from}{ scalar denoting first element of sequence }
+#!   \item{to}{ scalar denoting last element of sequence }
+#!   \item{\dots}{ ignored }
+#! }
+#! \details{
+#!    The following functions are turned into S3 gernerics in order to dispatch methods for \code{\link{integer64}}: 
+#!    \preformatted{
+#! 	   \code{\link{:}}
+#! 	   \code{\link{is.double}}
+#! 	   \code{\link{match}}
+#! 	   \code{\link{\%in\%}}
+#! 	   %\code{\link{table}}
+#! 	   \code{\link{rank}}
+#! 	   \code{\link{order}}
+#!    }
+#! }
+#! \value{
+#! 	\code{\link{invisible}}
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \note{
+#! 	\code{\link{is.double}} returns \code{FALSE} for \code{\link{integer64}} \cr
+#! 	\code{\link{:}} currently only dispatches at its first argument, thus \code{as.integer64(1):9} works but \code{1:as.integer64(9)} doesn't
+#! 	\code{\link{match}} currently only dispatches at its first argument and expects its second argument also to be integer64, otherwise throws an error. Beware of something like \code{match(2, as.integer64(0:3))}
+#! 	\code{\link{\%in\%}} currently only dispatches at its first argument and expects its second argument also to be integer64, otherwise throws an error. Beware of something like \code{2 \%in\% as.integer64(0:3)}
+#! 	\code{\link{order}} currently only orders a single argument, trying more than one raises an error
+#! }
+#! \seealso{
+#! 	\code{\link{bit64}}, \code{\link{S3}}
+#! }
+#! \examples{
+#!  is.double(as.integer64(1))
+#! 	as.integer64(1):9
+#!  match(as.integer64(2), as.integer64(0:3))
+#!  as.integer64(2) \%in\% as.integer64(0:3)
+#!  
+#!  unique(as.integer64(c(1,1,2)))
+#!  rank(as.integer64(c(1,1,2)))
+#!  
+#!  %table(as.integer64(c(1,1,2)))
+#!  %table(as.integer64(c(1,1,2)),as.integer64(c(3,4,4)))
+#!  %table(as.integer64(c(1,1,2)),c(3,4,4))
+#!  %table(c(1,1,2),as.integer64(c(3,4,4)))
+#!  
+#!  order(as.integer64(c(1,NA,2)))
+#!  
+#!  \dontshow{
+#!  stopifnot(identical(match(as.integer64(2), as.integer64(0:3)), match(2, 0:3)))
+#!  stopifnot(identical(as.integer64(2) \%in\% as.integer64(0:3), 2 \%in\% 0:3))
+#!  
+#!  stopifnot(identical(unique(as.integer64(c(1,1,2))), as.integer64(unique(c(1,1,2)))))
+#!  stopifnot(identical(rank(as.integer64(c(1,1,2))), rank(c(1,1,2))))
+#!  
+#!  %stopifnot(identical(table(as.integer64(c(1,1,2))), table(c(1,1,2))))
+#!  %stopifnot(identical(table(as.integer64(c(1,1,2)),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+#!  %stopifnot(identical(table(as.integer64(c(1,1,2)),c(3,4,4)), table(c(1,1,2),c(3,4,4))))
+#!  %stopifnot(identical(table(c(1,1,2),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+#!  
+#!  stopifnot(identical(order(as.integer64(c(1,NA,2))), order(c(1,NA,2))))
+#!  stopifnot(identical(order(as.integer64(c(1,NA,2)), decreasing=TRUE), order(c(1,NA,2), decreasing=TRUE)))
+#!  }
+#! }
+#! \keyword{ methods }
+
+# OCT 2013: bit64S3() at wish of CRAN maintainers replaced by direct conversion to S3 generics
+# in order to avoid assigning to globalenv
+
+if (!exists(":.default")){
+	":" <- function(from,to) UseMethod(":")
+	":.default" <- function(from,to) base::":"(from,to)
+}
+":.integer64" <- function(from, to)seq.integer64(from=from, to=to)
+
+if (!exists("is.double.default")){
+	"is.double" <- function(x) UseMethod("is.double")
+	"is.double.default" <- function(x) base::"is.double"(x)
+}
+"is.double.integer64" <- function(x)FALSE
+
+if (!exists("match.default")){
+	"match" <- function(x, table, ...) UseMethod("match")
+	"match.default" <- function(x, table, ...) base::"match"(x, table, ...)
+}
+
+if (!exists("%in%.default")){
+	"%in%" <- function(x, table) UseMethod("%in%")
+	"%in%.default" <- function(x, table) base::"%in%"(x, table)
+}
+
+if (!exists("rank.default")){
+	"rank" <- function(x, ...) UseMethod("rank")
+	"rank.default" <- function(x, ...) base::"rank"(x, ...)
+}
+
+# not yet able to combinewith other column types - better leave table() as is and hope for as.factor.integer64
+#if (!exists("table.default")){
+#	"table" <- function(...) UseMethod("table")
+#	"table.default" <- function(...) base::"table"(...)
+#}
+
+if (!exists("order.default")){
+	"order" <- function(...) UseMethod("order")
+	"order.default" <- function(...) base::"order"(...)
+}
+
diff --git a/R/sort64.R b/R/sort64.R
new file mode 100644
index 0000000..96dc603
--- /dev/null
+++ b/R/sort64.R
@@ -0,0 +1,690 @@
+# /*
+# R-Code for sorting and ordering
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+# */
+
+#! \name{ramsort.integer64}
+#! \alias{ramsort.integer64}
+#! \alias{shellsort.integer64}
+#! \alias{quicksort.integer64}
+#! \alias{mergesort.integer64}
+#! \alias{radixsort.integer64}
+#! \alias{ramorder.integer64}
+#! \alias{shellorder.integer64}
+#! \alias{quickorder.integer64}
+#! \alias{mergeorder.integer64}
+#! \alias{radixorder.integer64}
+#! \alias{ramsortorder.integer64}
+#! \alias{shellsortorder.integer64}
+#! \alias{quicksortorder.integer64}
+#! \alias{mergesortorder.integer64}
+#! \alias{radixsortorder.integer64}
+#! \title{
+#!    Low-level intger64 methods for in-RAM sorting and ordering
+#! }
+#! \description{
+#!   Fast low-level methods for sorting and ordering. 
+#!   The \code{..sortorder} methods do sorting and ordering at once, which requires more RAM than ordering but is (almost) as fast as as sorting.
+#! }
+#! \note{
+#!  Note that these methods purposely violate the functional programming paradigm: they are called for the side-effect of changing some of their arguments.
+#!  The \code{sort}-methods change \code{x}, the \code{order}-methods change \code{i}, and the \code{sortoder}-methods change both \code{x} and \code{i}
+#! }
+#! \usage{
+#! \method{shellsort}{integer64}(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+#! \method{shellsortorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+#! \method{shellorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+#! \method{mergesort}{integer64}(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+#! \method{mergeorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+#! \method{mergesortorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+#! \method{quicksort}{integer64}(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+#! , restlevel=floor(1.5*log2(length(x))), \dots)
+#! \method{quicksortorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+#! , restlevel=floor(1.5*log2(length(x))), \dots)
+#! \method{quickorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+#! , restlevel=floor(1.5*log2(length(x))), \dots)
+#! \method{radixsort}{integer64}(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE, radixbits=8L, \dots)
+#! \method{radixsortorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, radixbits=8L, \dots)
+#! \method{radixorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, radixbits=8L, \dots)
+#! \method{ramsort}{integer64}(x, has.na = TRUE, na.last=FALSE, decreasing = FALSE, stable = TRUE
+#! , optimize = c("time", "memory"), VERBOSE = FALSE, \dots)
+#! \method{ramsortorder}{integer64}(x, i, has.na = TRUE, na.last=FALSE, decreasing = FALSE, stable = TRUE
+#! , optimize = c("time", "memory"), VERBOSE = FALSE, \dots)
+#! \method{ramorder}{integer64}(x, i, has.na = TRUE, na.last=FALSE, decreasing = FALSE, stable = TRUE
+#! , optimize = c("time", "memory"), VERBOSE = FALSE, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{ a vector to be sorted by \code{\link{ramsort}} and \code{\link{ramsortorder}}, i.e. the output of  \code{\link{sort}} }
+#!   \item{i}{ integer positions to be modified by \code{\link{ramorder}} and \code{\link{ramsortorder}}, default is 1:n, in this case the output is similar to \code{\link{order}} }
+#!   \item{has.na}{
+#! boolean scalar defining whether the input vector might contain \code{NA}s. If we know we don't have NAs, this may speed-up.
+#! \emph{Note} that you risk a crash if there are unexpected \code{NA}s with \code{has.na=FALSE}
+#! }
+#!   \item{na.last}{
+#! boolean scalar telling ramsort whether to sort \code{NA}s last or first.
+#! \emph{Note} that 'boolean' means that there is no third option \code{NA} as in \code{\link{sort}}
+#! }
+#!   \item{decreasing}{
+#! boolean scalar telling ramsort whether to sort increasing or decreasing
+#! }
+#!   \item{stable}{
+#! boolean scalar defining whether stable sorting is needed. Allowing non-stable may speed-up.
+#! }
+#!   \item{optimize}{
+#! by default ramsort optimizes for 'time' which requires more RAM,
+#! set to 'memory' to minimize RAM requirements and sacrifice speed
+#! }
+#!   \item{restlevel}{
+#! number of remaining recursionlevels before \code{quicksort} switches from recursing to \code{shellsort}
+#! }
+#!   \item{radixbits}{
+#! 	size of radix in bits
+#! }
+#!   \item{VERBOSE}{
+#!   cat some info about chosen method
+#! }
+#!   \item{\dots}{ further arguments, passed from generics, ignored in methods }
+#! }
+#! \details{
+#!  see \code{\link[bit]{ramsort}}
+#! }
+#! \value{
+#!   These functions return the number of \code{NAs} found or assumed during sorting
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ programming }
+#! \keyword{ manip }
+#! \seealso{ \code{\link{ramsort}} for the generic, \code{\link[ff]{ramsort.default}} for the methods provided by package \code{\link[ff]{ff}}, \code{\link{sort.integer64}} for the sort interface and \code{\link{sortcache}} for caching the work of sorting}
+#! \examples{
+#!   x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#!   x
+#!   message("ramsort example")
+#!   s <- clone(x)
+#!   ramsort(s)
+#!   message("s has been changed in-place - whether or not ramsort uses an in-place algorithm")
+#!   s
+#!   message("ramorder example")
+#!   s <- clone(x)
+#!   o <- seq_along(s)
+#!   ramorder(s, o)
+#!   message("o has been changed in-place - s remains unchanged")
+#!   s
+#!   o
+#!   s[o]
+#!   message("ramsortorder example")
+#!   o <- seq_along(s)
+#!   ramsortorder(s, o)
+#!   message("s and o have both been changed in-place - this is much faster")
+#!   s
+#!   o
+#! }
+
+shellsort.integer64 <- function(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE, ...)
+{
+  force(x)
+  .Call(C_r_ram_integer64_shellsort
+  , x = x
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , PACKAGE = "bit64"
+  )
+}
+shellsortorder.integer64 <- function(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, ...)
+{
+  force(x)
+  force(i)
+  if (!is.integer(i)) 
+    stop("i must be integer")
+  if (length(i) != length(x)) 
+    stop("lengths of x and i don't match")  
+  .Call(C_r_ram_integer64_shellsortorder
+  , x = x
+  , i = i
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , PACKAGE = "bit64"
+  )
+}
+shellorder.integer64 <- function(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, ...)
+{
+  force(x)
+  force(i)
+  if (!is.integer(i)) 
+    stop("i must be integer")
+  if (length(i) != length(x)) 
+    stop("lengths of x and i don't match")  
+  .Call(C_r_ram_integer64_shellorder
+  , x = x
+  , i = i
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , PACKAGE = "bit64"
+  )
+}
+
+mergesort.integer64 <- function(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE, ...){
+  force(x)
+  .Call(C_r_ram_integer64_mergesort
+  , x = x
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , PACKAGE = "bit64"
+  )
+}
+
+mergeorder.integer64 <- function(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, ...){
+  force(x)
+  force(i)
+  if (!is.integer(i)) 
+    stop("i must be integer")
+  if (length(i) != length(x)) 
+    stop("lengths of x and i don't match")  
+  .Call(C_r_ram_integer64_mergeorder
+  , x = x
+  , i = i
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , PACKAGE = "bit64"
+  )
+}
+
+mergesortorder.integer64 <- function(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, ...){
+  force(x)
+  force(i)
+  if (!is.integer(i)) 
+    stop("i must be integer")
+  if (length(i) != length(x)) 
+    stop("lengths of x and i don't match")  
+  .Call(C_r_ram_integer64_mergesortorder
+  , x = x
+  , i = i
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , PACKAGE = "bit64"
+  )
+}
+
+
+quicksort.integer64 <- function(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, restlevel=floor(1.5*log2(length(x)))
+, ...){
+  force(x)
+  if (restlevel<0)
+    restlevel = 0L
+  .Call(C_r_ram_integer64_quicksort
+  , x = x
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , restlevel = as.integer(restlevel)
+  , PACKAGE = "bit64"
+  )
+}
+
+quicksortorder.integer64 <- function(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, restlevel=floor(1.5*log2(length(x)))
+, ...){
+  force(x)
+  force(i)
+  if (!is.integer(i)) 
+    stop("i must be integer")
+  if (length(i) != length(x)) 
+    stop("lengths of x and i don't match")  
+  if (restlevel<0)
+    restlevel = 0L
+  .Call(C_r_ram_integer64_quicksortorder
+  , x = x
+  , i = i
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , restlevel = as.integer(restlevel)
+  , PACKAGE = "bit64"
+  )
+}
+
+quickorder.integer64 <- function(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, restlevel=floor(1.5*log2(length(x)))
+, ...){
+  force(x)
+  force(i)
+  if (!is.integer(i)) 
+    stop("i must be integer")
+  if (length(i) != length(x)) 
+    stop("lengths of x and i don't match")  
+  if (restlevel<0)
+    restlevel = 0L
+  .Call(C_r_ram_integer64_quickorder
+  , x = x
+  , i = i
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , restlevel = as.integer(restlevel)
+  , PACKAGE = "bit64"
+  )
+}
+
+radixsort.integer64 <- function(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, radixbits=8L
+, ...)
+{
+  stopifnot(radixbits %in% c(1L, 2L, 4L, 8L, 16L))
+  force(x)
+  .Call(C_r_ram_integer64_radixsort
+  , x = x
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , radixbits = as.integer(radixbits)
+  , PACKAGE = "bit64"
+  )
+}
+
+radixsortorder.integer64 <- function(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, radixbits=8L
+, ...)
+{
+  stopifnot(radixbits %in% c(1L, 2L, 4L, 8L, 16L))
+  force(x)
+  force(i)
+  if (!is.integer(i)) 
+    stop("i must be integer")
+  if (length(i) != length(x)) 
+    stop("lengths of x and i don't match")  
+  .Call(C_r_ram_integer64_radixsortorder
+  , x = x
+  , i = i
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , radixbits = as.integer(radixbits)
+  , PACKAGE = "bit64"
+  )
+}
+
+radixorder.integer64 <- function(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, radixbits=8L
+, ...)
+{
+  stopifnot(radixbits %in% c(1L, 2L, 4L, 8L, 16L))
+  force(x)
+  force(i)
+  if (!is.integer(i)) 
+    stop("i must be integer")
+  if (length(i) != length(x)) 
+    stop("lengths of x and i don't match")  
+  .Call(C_r_ram_integer64_radixorder
+  , x = x
+  , i = i
+  , has_na     = as.logical(has.na)
+  , na_last    = as.logical(na.last)
+  , decreasing = as.logical(decreasing)
+  , radixbits = as.integer(radixbits)
+  , PACKAGE = "bit64"
+  )
+}
+
+ramsort.integer64 <- function (x
+, has.na = TRUE
+, na.last=FALSE
+, decreasing = FALSE
+, stable = TRUE
+, optimize = c("time", "memory")
+, VERBOSE = FALSE
+, ...
+)
+{
+	optimize <- match.arg(optimize)
+	if (is.null(names(x))){
+		if (optimize == "time"){
+			if (length(x)<2048){
+				if (VERBOSE) 
+					cat("ramsort selected mergesort\n")
+				mergesort(x, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}else if (length(x)<16777216){
+				if (VERBOSE) 
+					cat("ramsort selected radix8sort\n")
+				radixsort(x, radixbits=8L, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}else{
+				if (VERBOSE) 
+					cat("ramsort selected radix4sort\n")
+				radixsort(x, radixbits=4L, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}
+		}else{
+			if (VERBOSE) 
+				cat("ramsort selected quicksort\n")
+			quicksort(x, has.na = has.na, na.last = na.last, decreasing = decreasing)
+		}
+	}else{
+		if (stable || optimize == "time"){
+			i <- seq_along(x)
+		    if (length(x)<2048){
+				if (VERBOSE) 
+					cat("ramsortorder selected mergesortorder\n")
+				ret <- mergesortorder(x, i, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}else if (length(x)<2097152){
+				if (VERBOSE) 
+					cat("ramsortorder selected radix8sortorder\n")
+				ret <- radixsortorder(x, i, radixbits=8L, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}else{
+				if (VERBOSE) 
+					cat("ramsortorder selected radix4sortorder\n")
+				ret <- radixsortorder(x, i, radixbits=4L, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}
+		}else{
+			if (VERBOSE) 
+				cat("ramsort selected quicksortorder\n")
+			i <- seq_along(x)
+			ret <- quicksortorder(x, i, has.na = has.na, na.last = na.last, decreasing = decreasing)
+		}
+    setattr(x, "names", names(x)[i])
+    ret
+	}
+}
+
+ramsortorder.integer64 <- function (x
+, i
+, has.na = TRUE
+, na.last=FALSE
+, decreasing = FALSE
+, stable = TRUE
+, optimize = c("time", "memory")
+, VERBOSE = FALSE
+, ...
+)
+{
+	optimize <- match.arg(optimize)
+	if (is.null(names(x)) & is.null(names(i))){
+		if (stable || optimize == "time") {
+		    if (length(x)<2048){
+				if (VERBOSE) 
+					cat("ramsortorder selected mergesortorder\n")
+				mergesortorder(x, i, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}else if (length(x)<16777216){
+				if (VERBOSE) 
+					cat("ramsortorder selected radix8sortorder\n")
+				radixsortorder(x, i, radixbits=8L, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}else{
+				if (VERBOSE) 
+					cat("ramsortorder selected radix4sortorder\n")
+				radixsortorder(x, i, radixbits=4L, has.na = has.na, na.last = na.last, decreasing = decreasing)
+			}
+		}else{
+			if (VERBOSE) 
+				cat("ramsortorder selected quicksortorder\n")
+			quicksortorder(x, i, has.na = has.na, na.last = na.last, decreasing = decreasing)
+		}
+	}else
+	  stop("names not supported")
+}
+
+ramorder.integer64 <- function (x
+, i
+, has.na = TRUE
+, na.last=FALSE
+, decreasing = FALSE
+, stable = TRUE
+, optimize = c("time", "memory")
+, VERBOSE = FALSE
+, ...
+)
+{
+	optimize <- match.arg(optimize)
+	if (is.null(names(x)) & is.null(names(i))){
+		if (stable) {
+			if (VERBOSE) 
+				cat("ramorder selected mergeorder\n")
+			mergeorder(x, i, has.na = has.na, na.last = na.last, decreasing = decreasing)
+		}else{
+			if (VERBOSE) 
+				cat("ramorder selected quickorder\n")
+			quickorder(x, i, has.na = has.na, na.last = na.last, decreasing = decreasing)
+		}
+	}else
+	  stop("names not supported")
+}
+
+
+#! \name{sort.integer64}
+#! \alias{sort.integer64}
+#! \alias{order.integer64}
+#! \title{
+#!    High-level intger64 methods for sorting and ordering
+#! }
+#! \description{
+#!   Fast high-level methods for sorting and ordering. 
+#!   These are wrappers to \code{\link{ramsort}} and friends and do not modify their arguments.
+#! }
+#! \usage{
+#! \method{sort}{integer64}(x, decreasing = FALSE, has.na = TRUE, na.last = TRUE, stable = TRUE
+#! , optimize = c("time", "memory"), VERBOSE = FALSE, \dots)
+#! \method{order}{integer64}(\dots, na.last = TRUE, decreasing = FALSE, has.na = TRUE, stable = TRUE
+#! , optimize = c("time", "memory"), VERBOSE = FALSE)
+#! }
+#! \arguments{
+#!   \item{x}{ a vector to be sorted by \code{\link{ramsort}} and \code{\link{ramsortorder}}, i.e. the output of  \code{\link{sort}} }
+#!   \item{has.na}{
+#! boolean scalar defining whether the input vector might contain \code{NA}s. If we know we don't have NAs, this may speed-up.
+#! \emph{Note} that you risk a crash if there are unexpected \code{NA}s with \code{has.na=FALSE}
+#! }
+#!   \item{na.last}{
+#! boolean scalar telling ramsort whether to sort \code{NA}s last or first.
+#! \emph{Note} that 'boolean' means that there is no third option \code{NA} as in \code{\link{sort}}
+#! }
+#!   \item{decreasing}{
+#! boolean scalar telling ramsort whether to sort increasing or decreasing
+#! }
+#!   \item{stable}{
+#! boolean scalar defining whether stable sorting is needed. Allowing non-stable may speed-up.
+#! }
+#!   \item{optimize}{
+#! by default ramsort optimizes for 'time' which requires more RAM,
+#! set to 'memory' to minimize RAM requirements and sacrifice speed
+#! }
+#!   \item{VERBOSE}{
+#!   cat some info about chosen method
+#! }
+#!   \item{\dots}{ further arguments, passed from generics, ignored in methods }
+#! }
+#! \details{
+#!  see \code{\link{sort}} and \code{\link{order}}
+#! }
+#! \value{
+#!   \code{sort} returns the sorted vector and \code{vector} returns the order positions. 
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ programming }
+#! \keyword{ manip }
+#! \seealso{ \code{\link[=sort.integer64]{sort}}, \code{\link{sortcache}} }
+#! \examples{
+#!   x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+#!   x
+#!   sort(x)
+#!   message("the following has default optimize='time' which is faster but requires more RAM
+#! , this calls 'ramorder'")
+#!   order.integer64(x)
+#!   message("slower with less RAM, this calls 'ramsortorder'")
+#!   order.integer64(x, optimize="memory")
+#! }
+
+if (FALSE){
+	library(bit64)
+	x <- as.integer64(c(sample(1e7),NA))
+	#system.time(sortcache(x))[3]
+	# system.time(ordercache(x))[3]
+	system.time(sortordercache(x))[3]
+
+	# system.time(s <- sort(x, na.last=FALSE, decreasing=FALSE))[3]
+	# stopifnot(identical(s, {xs<-clone(x);ramsort(xs, na.last=FALSE, decreasing=FALSE);xs}))
+	# system.time(s <- sort(x, na.last=TRUE, decreasing=FALSE))[3]
+	# stopifnot(identical(s, {xs<-clone(x);ramsort(xs, na.last=TRUE, decreasing=FALSE);xs}))
+	# system.time(s <- sort(x, na.last=FALSE, decreasing=TRUE))[3]
+	# stopifnot(identical(s, {xs<-clone(x);ramsort(xs, na.last=FALSE, decreasing=TRUE);xs}))
+	# system.time(s <- sort(x, na.last=TRUE, decreasing=TRUE))[3]
+	# stopifnot(identical(s, {xs<-clone(x);ramsort(xs, na.last=TRUE, decreasing=TRUE);xs}))
+	
+	system.time(o <- order.integer64(x, na.last=FALSE, decreasing=FALSE))[3]
+	stopifnot(identical(o, {xo<-seq_along(x);ramorder(x, xo, na.last=FALSE, decreasing=FALSE);xo}))
+	system.time(o <- order.integer64(x, na.last=TRUE, decreasing=FALSE))[3]
+	stopifnot(identical(o, {xo<-seq_along(x);ramorder(x, xo, na.last=TRUE, decreasing=FALSE);xo}))
+	system.time(o <- order.integer64(x, na.last=FALSE, decreasing=TRUE))[3]
+	stopifnot(identical(o, {xo<-seq_along(x);ramorder(x, xo, na.last=FALSE, decreasing=TRUE);xo}))
+	system.time(o <- order.integer64(x, na.last=TRUE, decreasing=TRUE))[3]
+	stopifnot(identical(o, {xo<-seq_along(x);ramorder(x, xo, na.last=TRUE, decreasing=TRUE);xo}))
+	
+}
+
+sort.integer64 <- function(x
+, decreasing = FALSE
+, has.na = TRUE
+, na.last = TRUE
+, stable = TRUE
+, optimize = c("time", "memory")
+, VERBOSE = FALSE
+, ...
+){
+  do.na.last <- is.na(na.last) || na.last
+  c <- cache(x)
+  if (!is.null(c$sort)){
+		if (do.na.last || decreasing){
+			s <- double(length(x))
+			.Call(C_r_ram_integer64_sortsrt
+			, x = c$sort
+			, na_count   = as.integer(na.count <- c$na.count)
+			, na_last    = as.logical(do.na.last)
+			, decreasing = as.logical(decreasing)
+			, s		 	 = s
+			, PACKAGE = "bit64"
+			)
+			setattr(s, "class", "integer64")
+		}else 
+			s <- c$sort  # here we save copying at all
+  }else if (!is.null(c$order)){
+		if (do.na.last || decreasing){
+			s <- double(length(x))
+			.Call(C_r_ram_integer64_sortsrt
+			, x = x[c$order]
+			, na_count   = as.integer(na.count <- c$na.count)
+			, na_last    = as.logical(do.na.last)
+			, decreasing = as.logical(decreasing)
+			, s		 	 = s
+			, PACKAGE = "bit64"
+			)
+			setattr(s, "class", "integer64")
+		}else 
+			s <- x[c$order]
+  }else{
+    if (identical(c$na.count, 0L))
+	  has.na <- FALSE
+		s <- clone(x)
+		na.count <- ramsort(
+			s
+		, has.na=has.na
+		, na.last=do.na.last
+		, decreasing=decreasing
+		, stable=stable
+		, optimize = optimize
+		, VERBOSE = FALSE
+		)
+  }
+  if (is.na(na.last) && na.count)
+		length(s) <- length(s) - na.count
+  s
+}
+
+
+order.integer64 <- function(
+  ...
+, na.last = TRUE
+, decreasing = FALSE
+, has.na = TRUE
+, stable = TRUE
+, optimize = c("time", "memory")
+, VERBOSE = FALSE
+){
+  do.na.last <- is.na(na.last) || na.last
+	# COPY ON MODIFY is broken for reading from list(...)
+	# because list(...) creates a copy of all ... and this invalidates our caches
+	# therefore we go this sick workaround
+	argsymbols <- as.list(substitute(list(...)))[-1L]
+	argframe <- parent.frame()
+	A <- function(i)eval(argsymbols[[i]], argframe)
+	N <- length(argsymbols)
+  if (N!=1L)
+	stop("can only order one vector at the moment")
+  x <- A(1)
+  c <- cache(x)
+  if (!is.null(c$order)){
+		if (do.na.last || decreasing){
+			o <- integer(length(x))
+			if (is.null(c$sort)){
+				.Call(C_r_ram_integer64_orderord
+				, x = x
+				, i = c$order
+				, na_count   = as.integer(na.count <- c$na.count)
+				, na_last    = as.logical(do.na.last)
+				, decreasing = as.logical(decreasing)
+				, o		 	 = o
+				, PACKAGE = "bit64"
+				)
+			}else{
+				.Call(C_r_ram_integer64_sortorderord
+				, x = c$sort
+				, i = c$order
+				, na_count   = as.integer(na.count <- c$na.count)
+				, na_last    = as.logical(do.na.last)
+				, decreasing = as.logical(decreasing)
+				, o		 	 = o
+				, PACKAGE = "bit64"
+				)
+			}
+  		}else 
+			o <- c$order  # here we save copying at all
+  }else{
+	  if (identical(c$na.count, 0L))
+		has.na <- FALSE
+	  optimize <- match.arg(optimize)
+	  o <- seq_along(x)
+	  if (optimize=="time"){
+		  s <- clone(x)
+		  na.count <- ramsortorder(s, o
+		  , has.na=has.na
+		  , na.last=do.na.last
+		  , decreasing=decreasing
+		  , stable=stable
+		  , optimize = optimize
+		  , VERBOSE = FALSE
+		  )
+	  }else{
+		  na.count <- ramorder(x, o
+		  , has.na=has.na
+		  , na.last=do.na.last
+		  , decreasing=decreasing
+		  , stable=stable
+		  , optimize = optimize
+		  , VERBOSE = FALSE
+		  )
+	  }
+	}
+	if (is.na(na.last) && na.count)
+	  length(o) <- length(o) - na.count
+	o
+}
+
+
diff --git a/R/sortuse64.R b/R/sortuse64.R
new file mode 100644
index 0000000..c291b6a
--- /dev/null
+++ b/R/sortuse64.R
@@ -0,0 +1,582 @@
+# /*
+# R-Code for searching and merging
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+# */
+
+#! \name{sortnut}
+#! \alias{sortnut}
+#! \alias{sortnut.integer64}
+#! \alias{ordernut}
+#! \alias{ordernut.integer64}
+#! \alias{sortfin}
+#! \alias{sortfin.integer64}
+#! \alias{orderpos}
+#! \alias{orderpos.integer64}
+#! \alias{orderfin}
+#! \alias{orderfin.integer64}
+#! \alias{sortorderpos}
+#! \alias{sortorderpos.integer64}
+#! \alias{orderdup}
+#! \alias{orderdup.integer64}
+#! \alias{sortorderdup}
+#! \alias{sortorderdup.integer64}
+#! \alias{sortuni}
+#! \alias{sortuni.integer64}
+#! \alias{orderuni}
+#! \alias{orderuni.integer64}
+#! \alias{sortorderuni}
+#! \alias{sortorderuni.integer64}
+#! \alias{orderupo}
+#! \alias{orderupo.integer64}
+#! \alias{sortorderupo}
+#! \alias{sortorderupo.integer64}
+#! \alias{ordertie}
+#! \alias{ordertie.integer64}
+#! \alias{sortordertie}
+#! \alias{sortordertie.integer64}
+#! \alias{sorttab}
+#! \alias{sorttab.integer64}
+#! \alias{ordertab}
+#! \alias{ordertab.integer64}
+#! \alias{sortordertab}
+#! \alias{sortordertab.integer64}
+#! \alias{orderkey}
+#! \alias{orderkey.integer64}
+#! \alias{sortorderkey}
+#! \alias{sortorderkey.integer64}
+#! \alias{orderrnk}
+#! \alias{orderrnk.integer64}
+#! \alias{sortorderrnk}
+#! \alias{sortorderrnk.integer64}
+#! \alias{sortqtl}
+#! \alias{sortqtl.integer64}
+#! \alias{orderqtl}
+#! \alias{orderqtl.integer64}
+#! \title{
+#!    Searching and other uses of sorting for 64bit integers
+#! }
+#! \description{
+#!   This is roughly an implementation of hash functionality but based on sorting instead on a hasmap.
+#!   Since sorting is more informative than hashingwe can do some more interesting things.
+#! }
+#! \usage{
+#! sortnut(sorted, \dots)
+#! ordernut(table, order, \dots)
+#! sortfin(sorted, x, \dots)
+#! orderfin(table, order, x, \dots)
+#! orderpos(table, order, x, \dots)
+#! sortorderpos(sorted, order, x, \dots)
+#! orderdup(table, order, \dots)
+#! sortorderdup(sorted, order, \dots)
+#! sortuni(sorted, nunique, \dots)
+#! orderuni(table, order, nunique, \dots)
+#! sortorderuni(table, sorted, order, nunique, \dots)
+#! orderupo(table, order, nunique, \dots)
+#! sortorderupo(sorted, order, nunique, keep.order = FALSE, \dots)
+#! ordertie(table, order, nties, \dots)
+#! sortordertie(sorted, order, nties, \dots)
+#! sorttab(sorted, nunique, \dots)
+#! ordertab(table, order, nunique, \dots)
+#! sortordertab(sorted, order, \dots)
+#! orderkey(table, order, na.skip.num = 0L, \dots)
+#! sortorderkey(sorted, order, na.skip.num = 0L, \dots)
+#! orderrnk(table, order, na.count, \dots)
+#! sortorderrnk(sorted, order, na.count, \dots)
+#! \method{sortnut}{integer64}(sorted, \dots)
+#! \method{ordernut}{integer64}(table, order, \dots)
+#! \method{sortfin}{integer64}(sorted, x, method=NULL, \dots)
+#! \method{orderfin}{integer64}(table, order, x, method=NULL, \dots)
+#! \method{orderpos}{integer64}(table, order, x, nomatch=NA, method=NULL, \dots)
+#! \method{sortorderpos}{integer64}(sorted, order, x, nomatch=NA, method=NULL, \dots)
+#! \method{orderdup}{integer64}(table, order, method=NULL, \dots)
+#! \method{sortorderdup}{integer64}(sorted, order, method=NULL, \dots)
+#! \method{sortuni}{integer64}(sorted, nunique, \dots)
+#! \method{orderuni}{integer64}(table, order, nunique, keep.order=FALSE, \dots)
+#! \method{sortorderuni}{integer64}(table, sorted, order, nunique, \dots)
+#! \method{orderupo}{integer64}(table, order, nunique, keep.order=FALSE, \dots)
+#! \method{sortorderupo}{integer64}(sorted, order, nunique, keep.order = FALSE, \dots)
+#! \method{ordertie}{integer64}(table, order, nties, \dots)
+#! \method{sortordertie}{integer64}(sorted, order, nties, \dots)
+#! \method{sorttab}{integer64}(sorted, nunique, \dots)
+#! \method{ordertab}{integer64}(table, order, nunique, denormalize=FALSE, keep.order=FALSE, \dots)
+#! \method{sortordertab}{integer64}(sorted, order, denormalize=FALSE, \dots)
+#! \method{orderkey}{integer64}(table, order, na.skip.num = 0L, \dots)
+#! \method{sortorderkey}{integer64}(sorted, order, na.skip.num = 0L, \dots)
+#! \method{orderrnk}{integer64}(table, order, na.count, \dots)
+#! \method{sortorderrnk}{integer64}(sorted, order, na.count, \dots)
+#! \method{sortqtl}{integer64}(sorted, na.count, probs, \dots)
+#! \method{orderqtl}{integer64}(table, order, na.count, probs, \dots)
+#! }
+#! \arguments{
+#!   \item{x}{ an \code{\link{integer64}} vector }
+#!   \item{sorted}{ a sorted \code{\link{integer64}} vector }
+#!   \item{table}{ the original data with original order under the sorted vector }
+#!   \item{order}{ an \code{\link{integer}} order vector that turns 'table' into 'sorted' }
+#!   \item{nunique}{ number of unique elements, usually we get this from cache or call \code{sortnut} or \code{ordernut} }
+#!   \item{nties}{ number of tied values, usually we get this from cache or call \code{sortnut} or \code{ordernut} }
+#!   \item{denormalize}{ FALSE returns counts of unique values, TRUE returns each value with its counts }
+#!   \item{nomatch}{ the value to be returned if an element is not found in the hashmap }
+#!   \item{keep.order}{ determines order of results and speed: \code{FALSE} (the default) is faster and returns in sorted order, \code{TRUE} returns in the order of first appearance in the original data, but this requires extra work } 
+#!   \item{probs}{ vector of probabilities in [0..1] for which we seek quantiles }
+#!   \item{na.skip.num}{ 0 or the number of \code{NA}s. With 0, \code{NA}s are coded with 1L, with the number of \code{NA}s, these are coded with \code{NA}, the latter needed for \code{\link{as.factor.integer64}} }
+#!   \item{na.count}{ the number of \code{NA}s, needed for this low-level function algorithm }
+#!   \item{method}{ see details }
+#!   \item{\dots}{ further arguments, passed from generics, ignored in methods }
+#! }
+#! \details{
+#! \tabular{rrrrl}{
+#!    \bold{sortfun} \tab \bold{orderfun} \tab \bold{sortorderfun} \tab \bold{see also}          \tab \bold{description} \cr
+#!    \code{sortnut} \tab \code{ordernut} \tab                     \tab  \tab return number of tied and of unique values \cr
+#!    \code{sortfin} \tab \code{orderfin} \tab                     \tab \code{\link{\%in\%.integer64}} \tab return logical whether \code{x} is in \code{table} \cr
+#!                   \tab \code{orderpos} \tab \code{sortorderpos} \tab \code{\link[=match.integer64]{match}} \tab return positions of \code{x} in \code{table} \cr
+#!                   \tab \code{orderdup} \tab \code{sortorderdup} \tab \code{\link[=duplicated.integer64]{duplicated}} \tab return logical whether values are duplicated \cr
+#!    \code{sortuni} \tab \code{orderuni} \tab \code{sortorderuni} \tab \code{\link[=unique.integer64]{unique}} \tab return unique values (=dimensiontable) \cr
+#!                   \tab \code{orderupo} \tab \code{sortorderupo} \tab \code{\link[=unique.integer64]{unique}} \tab return positions of unique values \cr
+#!                   \tab \code{ordertie} \tab \code{sortordertie} \tab  \tab return positions of tied values \cr
+#!                   \tab \code{orderkey} \tab \code{sortorderkey} \tab  \tab positions of values in vector of unique values (match in dimensiontable) \cr
+#!    \code{sorttab} \tab \code{ordertab} \tab \code{sortordertab} \tab \code{\link[=table.integer64]{table}} \tab tabulate frequency of values  \cr
+#!                   \tab \code{orderrnk} \tab \code{sortorderrnk} \tab  \tab rank averaging ties \cr
+#!    \code{sortqtl} \tab \code{orderqtl} \tab                     \tab  \tab return quantiles given probabilities \cr
+#! }
+#! The functions \code{sortfin}, \code{orderfin}, \code{orderpos} and \code{sortorderpos} each offer three algorithms for finding \code{x} in \code{table}.  \cr
+#! With \code{method=1L} each value of \code{x} is searched independently using \emph{binary search}, this is fastest for small \code{table}s. \cr
+#! With \code{method=2L} the values of \code{x} are first sorted and then searched using \emph{doubly exponential search}, this is the best allround method. \cr
+#! With \code{method=3L} the values of \code{x} are first sorted and then searched using simple merging, this is the fastest method if \code{table} is huge and \code{x} has similar size and distribution of values. \cr
+#! With \code{method=NULL} the functions use a heuristic to determine the fastest algorithm. \cr
+#!
+#! The functions \code{orderdup} and \code{sortorderdup} each offer two algorithms for setting the truth values in the return vector.  \cr
+#! With \code{method=1L} the return values are set directly which causes random write access on a possibly large return vector. \cr
+#! With \code{method=2L} the return values are first set in a smaller bit-vector -- random access limited to a smaller memory region -- and finally written sequentially to the logical output  vector. \cr
+#! With \code{method=NULL} the functions use a heuristic to determine the fastest algorithm. \cr
+#! }
+#! \value{
+#!   see details
+#! }
+#! \author{
+#! Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+#! }
+#! \keyword{ programming }
+#! \keyword{ manip }
+#! \seealso{ \code{\link[=match.integer64]{match}} }
+#! \examples{
+#!  message("check the code of 'optimizer64' for examples:")
+#!  print(optimizer64)
+#! }
+
+
+
+sortnut <- function(sorted, ...)UseMethod("sortnut")
+sortnut.integer64 <- function(sorted, ...)
+{
+  ret <- .Call(C_r_ram_integer64_sortnut, x = sorted, PACKAGE = "bit64")
+  names(ret) <- c("nunique","nties")
+  ret
+}
+
+ordernut <- function(table, order, ...)UseMethod("ordernut")
+ordernut.integer64 <- function(table, order, ...)
+{
+  ret <- .Call(C_r_ram_integer64_ordernut, table = as.integer64(table), order = as.integer(order), PACKAGE = "bit64")
+  names(ret) <- c("nunique","nties")
+  ret
+}
+
+sortfin <- function(sorted, x, ...)UseMethod("sortfin")
+sortfin.integer64 <- function(sorted, x, method=NULL, ...)
+{
+  n <- length(x)
+  if (is.null(method)){
+	if (n<2048){
+	  method <- 1L
+	}else if (n<length(sorted)/128){
+	  method <- 2L
+	}else{
+	  method <- 3L
+	}
+  }else method <- as.integer(method)
+  ret <- logical(n)
+  if (method==1L){
+	  .Call(C_r_ram_integer64_sortfin_asc
+	  , x = as.integer64(x)
+	  , sorted = as.integer64(sorted)
+	  , method= method
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+  }else{
+    sx <- clone(as.integer64(x)); o <- seq_along(x); ramsortorder(sx, o, na.last=FALSE, ...)
+	ret[o] <- .Call(C_r_ram_integer64_sortfin_asc
+	  , x = sx
+	  , sorted = as.integer64(sorted)
+	  , method= method
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+	  ret
+  }
+}
+
+orderfin <- function(table, order, x, ...)UseMethod("orderfin")
+orderfin.integer64 <- function(table, order, x, method=NULL, ...)
+{
+  n <- length(x)
+  if (is.null(method)){
+	if (n<4096){
+	  method <- 1L
+	}else if (n<length(table)/8){
+	  method <- 2L
+	}else{
+	  method <- 3L
+	}
+  }else method <- as.integer(method)
+  ret <- logical(n)
+  if (method==1L){
+	  .Call(C_r_ram_integer64_orderfin_asc
+	  , x = as.integer64(x)
+	  , table = as.integer64(table)
+	  , order = as.integer(order)
+	  , method= as.integer(method)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+  }else{
+    o <- seq_along(x); ramorder(x, o, na.last=FALSE, ...)
+	ret[o] <- .Call(C_r_ram_integer64_orderfin_asc
+	  , x = x[o]
+	  , table = as.integer64(table)
+	  , order = as.integer(order)
+	  , method= as.integer(method)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+	  ret
+  }
+}
+
+
+orderpos <- function(table, order, x, ...)UseMethod("orderpos")
+orderpos.integer64 <- function(table, order, x, nomatch=NA, method=NULL, ...)
+{
+  n <- length(x)
+  if (is.null(method)){
+	if (n<4096){
+	  method <- 1L
+	}else if (n<length(table)/8){
+	  method <- 2L
+	}else{
+	  method <- 3L
+	}
+  }else method <- as.integer(method)
+  ret <- integer(n);
+  if (method==1L){
+	  .Call(C_r_ram_integer64_orderpos_asc
+	  , x = as.integer64(x)
+	  , table = as.integer64(table)
+	  , order = as.integer(order)
+	  , nomatch = as.integer(nomatch)
+	  , method= as.integer(method)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+  }else{
+    o <- seq_along(x); ramorder(x, o, na.last=FALSE, ...)
+	ret[o] <- .Call(C_r_ram_integer64_orderpos_asc
+	  , x = x[o]
+	  , table = as.integer64(table)
+	  , order = as.integer(order)
+	  , nomatch = as.integer(nomatch)
+	  , method= as.integer(method)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+	  ret
+  }
+}
+
+sortorderpos <- function(sorted, order, x, ...)UseMethod("sortorderpos")
+sortorderpos.integer64 <- function(sorted, order, x, nomatch=NA, method=NULL, ...)
+{
+  n <- length(x)
+  if (is.null(method)){
+	if (n<2048){
+	  method <- 1L
+	}else if (n<length(sorted)/128){
+	  method <- 2L
+	}else{
+	  method <- 3L
+	}
+  }else method <- as.integer(method)
+  ret <- integer(n)
+  if (method==1L){
+	  .Call(C_r_ram_integer64_sortorderpos_asc
+	  , x = as.integer64(x)
+	  , sorted = as.integer64(sorted)
+	  , order = as.integer(order)
+	  , nomatch = as.integer(nomatch)
+	  , method= as.integer(method)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+  }else{
+    sx <- clone(as.integer64(x)); o <- seq_along(x); ramsortorder(sx, o, na.last=FALSE, ...)
+	ret[o] <- .Call(C_r_ram_integer64_sortorderpos_asc
+	  , x = sx
+	  , sorted = as.integer64(sorted)
+	  , order = as.integer(order)
+	  , nomatch = as.integer(nomatch)
+	  , method= as.integer(method)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+	  ret
+  }
+}
+
+
+
+orderdup <- function(table, order, ...)UseMethod("orderdup")
+orderdup.integer64 <- function(table, order, method=NULL, ...)
+{
+  if (is.null(method)){
+    if (length(table)<4194304)
+	  method <- 1L
+	else
+	  method <- 2L
+  }else method <- as.integer(method)
+  ret <- logical(length(table))
+  .Call(C_r_ram_integer64_orderdup_asc
+  , table = as.integer64(table)
+  , order = as.integer(order)
+  , method = method
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+
+sortorderdup <- function(sorted, order, ...)UseMethod("sortorderdup")
+sortorderdup.integer64 <- function(sorted, order, method=NULL, ...)
+{
+  if (is.null(method)){
+    if (length(sorted)<4194304)
+	  method <- 1L
+	else
+	  method <- 2L
+  }else method <- as.integer(method)
+  ret <- logical(length(sorted))
+  .Call(C_r_ram_integer64_sortorderdup_asc
+  , sorted = as.integer64(sorted)
+  , order = as.integer(order)
+  , method = method
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+
+
+
+sortuni <- function(sorted, nunique, ...)UseMethod("sortuni")
+sortuni.integer64 <- function(sorted, nunique, ...)
+{
+  ret <- integer64(nunique)
+  .Call(C_r_ram_integer64_sortuni_asc
+  , sorted = as.integer64(sorted)
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+orderuni <- function(table, order, nunique, ...)UseMethod("orderuni")
+orderuni.integer64 <- function(table, order, nunique, keep.order=FALSE, ...)
+{
+  ret <- integer64(nunique)
+  .Call(C_r_ram_integer64_orderuni_asc
+  , table = as.integer64(table)
+  , order = as.integer(order)
+  , keep.order = as.logical(keep.order)
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+sortorderuni <- function(table, sorted, order, nunique, ...)UseMethod("sortorderuni")
+sortorderuni.integer64 <- function(table, sorted, order, nunique, ...)
+{
+  ret <- integer64(nunique)
+	  .Call(C_r_ram_integer64_sortorderuni_asc
+	  , table = as.integer64(table)
+	  , sorted = as.integer64(sorted)
+	  , order = as.integer(order)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+}
+
+orderupo <- function(table, order, nunique, ...)UseMethod("orderupo")
+orderupo.integer64 <- function(table, order, nunique, keep.order=FALSE, ...)
+{
+	ret <- integer(nunique)
+	.Call(C_r_ram_integer64_orderupo_asc
+	, table = as.integer64(table)
+	, order = as.integer(order)
+	, keep.order = as.logical(keep.order)
+	, ret = ret
+	, PACKAGE = "bit64"
+	)
+}
+
+sortorderupo <- function(sorted, order, nunique, keep.order=FALSE, ...)UseMethod("sortorderupo")
+sortorderupo.integer64 <- function(sorted, order, nunique, keep.order=FALSE, ...)
+{
+	ret <- integer(nunique)
+	ret2 <- .Call(C_r_ram_integer64_sortorderupo_asc
+	, sorted = as.integer64(sorted)
+	, order = as.integer(order)
+	, keep.order = as.logical(keep.order)
+	, ret = ret
+	, PACKAGE = "bit64"
+	)
+	ret2
+}
+
+
+ordertie <- function(table, order, nties, ...)UseMethod("ordertie")
+ordertie.integer64 <- function(table, order, nties, ...)
+{
+  ret <- integer(nties)
+  .Call(C_r_ram_integer64_ordertie_asc
+  , table = as.integer64(table)
+  , order = as.integer(order)
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+sortordertie <- function(sorted, order, nties, ...)UseMethod("sortordertie")
+sortordertie.integer64 <- function(sorted, order, nties, ...)
+{
+  ret <- integer(nties)
+	  .Call(C_r_ram_integer64_sortordertie_asc
+	  , sorted = as.integer64(sorted)
+	  , order = as.integer(order)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+}
+
+
+sorttab <- function(sorted, nunique, ...)UseMethod("sorttab")
+sorttab.integer64 <- function(sorted, nunique, ...)
+{
+  ret <- integer(nunique)
+  .Call(C_r_ram_integer64_sorttab_asc
+  , sorted = as.integer64(sorted)
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+ordertab <- function(table, order, nunique, ...)UseMethod("ordertab")
+ordertab.integer64 <- function(table, order, nunique, denormalize=FALSE, keep.order=FALSE, ...)
+{
+  denormalize <- as.logical(denormalize)
+  keep.order <- as.logical(keep.order)
+  ret <- integer(if (denormalize || keep.order) length(table) else nunique) 
+  .Call(C_r_ram_integer64_ordertab_asc
+  , table = as.integer64(table)
+  , order = as.integer(order)
+  , denormalize = denormalize
+  , keep.order = keep.order
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+sortordertab <- function(sorted, order, ...)UseMethod("sortordertab")
+sortordertab.integer64 <- function(sorted, order, denormalize=FALSE, ...)
+{
+  ret <- integer(length(sorted))
+	  .Call(C_r_ram_integer64_sortordertab_asc
+	  , sorted = as.integer64(sorted)
+	  , order = as.integer(order)
+	  , denormalize = as.logical(denormalize)
+	  , ret = ret
+	  , PACKAGE = "bit64"
+	  )
+}
+
+orderkey <- function(table, order, na.skip.num=0L, ...)UseMethod("orderkey")
+orderkey.integer64 <- function(table, order, na.skip.num=0L, ...)
+{
+	ret <- integer(length(table))
+	.Call(C_r_ram_integer64_orderkey_asc
+	, table = as.integer64(table)
+	, order = as.integer(order)
+	, na.skip.num=na.skip.num
+	, ret = ret
+	, PACKAGE = "bit64"
+	)
+}
+
+sortorderkey <- function(sorted, order, na.skip.num=0L, ...)UseMethod("sortorderkey")
+sortorderkey.integer64 <- function(sorted, order, na.skip.num=0L, ...)
+{
+	ret <- integer(length(sorted))
+	.Call(C_r_ram_integer64_sortorderkey_asc
+	, sorted = as.integer64(sorted)
+	, order = as.integer(order)
+	, na.skip.num=na.skip.num
+	, ret = ret
+	, PACKAGE = "bit64"
+	)
+}
+
+
+orderrnk <- function(table, order, na.count, ...)UseMethod("orderrnk")
+orderrnk.integer64 <- function(table, order, na.count, ...)
+{
+  ret <- double(length(table))
+  .Call(C_r_ram_integer64_orderrnk_asc
+  , table = as.integer64(table)
+  , order = as.integer(order)
+  , na.count=as.integer(na.count)
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+sortorderrnk <- function(sorted, order, na.count, ...)UseMethod("sortorderrnk")
+sortorderrnk.integer64 <- function(sorted, order, na.count, ...)
+{
+  ret <- double(length(sorted))
+  .Call(C_r_ram_integer64_sortorderrnk_asc
+  , sorted = as.integer64(sorted)
+  , order = as.integer(order)
+  , na.count=as.integer(na.count)
+  , ret = ret
+  , PACKAGE = "bit64"
+  )
+}
+
+
+sortqtl <- function(sorted, na.count, probs, ...)UseMethod("sortqtl")
+sortqtl.integer64 <- function(sorted, na.count, probs, ...){
+	n <- length(sorted) - na.count  # nvalid
+	ret <- sorted[na.count + round(1L + probs*(n-1L))]
+	ret[is.na(probs)] <- NA ## xx this fix only neccessary until we have C-implementation of [.integer64 handling NA
+	ret
+}
+
+orderqtl <- function(table, order, na.count, probs, ...)UseMethod("orderqtl")
+orderqtl.integer64 <- function(table, order, na.count, probs, ...){
+	n <- length(table) - na.count  # nvalid
+	ret <- table[ order[na.count + round(1L + probs*(n-1L))] ]
+	ret[is.na(probs)] <- NA ## xx this fix only neccessary until we have C-implementation of [.integer64 handling NA
+	ret
+}
diff --git a/R/zzz.R b/R/zzz.R
new file mode 100644
index 0000000..cca2c43
--- /dev/null
+++ b/R/zzz.R
@@ -0,0 +1,37 @@
+# /*
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+# */
+
+.onLoad <- function(lib, pkg) {
+  ##library.dynam("bit64", pkg, lib) use useDynLib(bit) in NAMESPACE instead
+  ##packageStartupMessage("Loading package bit64 ", packageDescription("bit64", fields="Version"))
+}
+
+.onAttach <- function(libname, pkgname){
+  packageStartupMessage("Attaching package bit64")
+  packageStartupMessage("package:bit64 (c) 2011-2012 Jens Oehlschlaegel")
+  packageStartupMessage("creators: integer64 seq :")
+  packageStartupMessage("coercion: as.integer64 as.vector as.logical as.integer as.double as.character as.bin")
+  packageStartupMessage("logical operator: ! & | xor != == < <= >= >")
+  packageStartupMessage("arithmetic operator: + - * / %/% %% ^")
+  packageStartupMessage("math: sign abs sqrt log log2 log10")
+  packageStartupMessage("math: floor ceiling trunc round")
+  packageStartupMessage("querying: is.integer64 is.vector [is.atomic} [length] format print str")
+  packageStartupMessage("values: is.na is.nan is.finite is.infinite")
+  packageStartupMessage("aggregation: any all min max range sum prod")
+  packageStartupMessage("cumulation: diff cummin cummax cumsum cumprod")
+  packageStartupMessage("access: length<- [ [<- [[ [[<-")
+  packageStartupMessage("combine: c rep cbind rbind as.data.frame")
+  packageStartupMessage("WARNING don't use as subscripts")
+  packageStartupMessage("WARNING semantics differ from integer")
+  packageStartupMessage("for more help type ?bit64")
+}
+.onUnload <- function(libpath){
+   packageStartupMessage("Unloading package bit64")
+   library.dynam.unload("bit64", libpath)
+}
diff --git a/data/benchmark64.data.rda b/data/benchmark64.data.rda
new file mode 100644
index 0000000..add1bf2
Binary files /dev/null and b/data/benchmark64.data.rda differ
diff --git a/data/optimizer64.data.rda b/data/optimizer64.data.rda
new file mode 100644
index 0000000..e13a665
Binary files /dev/null and b/data/optimizer64.data.rda differ
diff --git a/debian/README.source b/debian/README.source
deleted file mode 100644
index d964498..0000000
--- a/debian/README.source
+++ /dev/null
@@ -1,15 +0,0 @@
-Explanation for binary files inside source package according to
-  http://lists.debian.org/debian-devel/2013/09/msg00332.html
-
-Files: data/benchmark64.data.rda
-Documentation: man/benchmark64.data.rd
-   Results of performance measurement on a Core i7 Lenovo T410 8 GB RAM
-   under Windows 7 64bit
-
-Files: data/optimizer64.data.rda
-Documentation: man/optimizer64.data.rd
-    Results of performance measurement on a Core i7 Lenovo T410 8 GB RAM
-    under Windows 7 64bit
-
-
- -- Andreas Tille <tille at debian.org>  Sat, 26 Aug 2017 00:16:39 +0200
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 412672a..0000000
--- a/debian/changelog
+++ /dev/null
@@ -1,5 +0,0 @@
-r-cran-bit64 (0.9-7-1) unstable; urgency=medium
-
-  * Initial release (closes: #873269)
-
- -- Andreas Tille <tille at debian.org>  Sat, 26 Aug 2017 00:16:39 +0200
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index f599e28..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-10
diff --git a/debian/control b/debian/control
deleted file mode 100644
index c5914a6..0000000
--- a/debian/control
+++ /dev/null
@@ -1,32 +0,0 @@
-Source: r-cran-bit64
-Maintainer: Debian Med Packaging Team <debian-med-packaging at lists.alioth.debian.org>
-Uploaders: Andreas Tille <tille at debian.org>
-Section: gnu-r
-Priority: optional
-Build-Depends: debhelper (>= 10),
-               dh-r,
-               r-base-dev,
-               r-cran-bit
-Standards-Version: 4.1.0
-Vcs-Browser: https://anonscm.debian.org/viewvc/debian-med/trunk/packages/R/r-cran-bit64/trunk/
-Vcs-Svn: svn://anonscm.debian.org/debian-med/trunk/packages/R/r-cran-bit64/trunk/
-Homepage: https://cran.r-project.org/package=bit64
-
-Package: r-cran-bit64
-Architecture: any
-Depends: ${R:Depends},
-         ${shlibs:Depends},
-         ${misc:Depends}
-Recommends: ${R:Recommends}
-Suggests: ${R:Suggests}
-Description: GNU R S3 Class for Vectors of 64bit Integers
- Package 'bit64' provides serializable S3 atomic 64bit (signed) integers.
- These are useful for handling database keys and exact counting in +-2^63.
- WARNING: do not use them as replacement for 32bit integers, integer64 are not
- supported for subscripting by R-core and they have different semantics when
- combined with double, e.g. integer64 + double => integer64.
- Class integer64 can be used in vectors, matrices, arrays and data.frames.
- Methods are available for coercion from and to logicals, integers, doubles,
- characters and factors as well as many elementwise and summary functions.
- Many fast algorithmic operations such as 'match' and 'order' support
- interactive data exploration and manipulation and optionally leverage caching.
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index 9b51f9f..0000000
--- a/debian/copyright
+++ /dev/null
@@ -1,16 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: bit64
-Upstream-Contact: Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
-Source: https://cran.r-project.org/package=bit64
-
-Files: *
-Copyright: Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
-License: GPL-2
-
-Files: debian/*
-Copyright: 2017 Andreas Tille <tille at debian.org>
-License: GPL-2
-
-License: GPL-2
- On Debian systems you can find the full text of the GNU General Public
- License at /usr/share/common-licenses/GPL-2.
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index e68d5e2..0000000
--- a/debian/rules
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/make -f
-
-%:
-	dh $@ --buildsystem R
-
-override_dh_fixperms:
-	dh_fixperms
-	find debian -name make_rd.pl -exec chmod -x \{\} \;
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/debian/watch b/debian/watch
deleted file mode 100644
index dbf2d2a..0000000
--- a/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=4
-https://cran.r-project.org/src/contrib/bit64_([-\d.]*)\.tar\.gz
diff --git a/exec/make_rd.pl b/exec/make_rd.pl
new file mode 100644
index 0000000..0316f63
--- /dev/null
+++ b/exec/make_rd.pl
@@ -0,0 +1,33 @@
+# reads the standard input line by line and writes out all lines
+# that begin with "#!". The output is splitted into several output
+# files as follows: Every time a line of the format "#! \name{<name>}
+# is encountered, a file with the name "<name>.Rd" is created and
+# the output is written into it (until the next line with this format
+# is found). Thus, the first line beginning with "#!" must of this
+# type, because otherwise the script would not know where to write
+# the output to.
+
+my $open = 0; 
+while(<STDIN>)
+{
+    $line = $_;
+    if( $line =~ /^#! ?(.*)/ )
+    {
+    	$line = $1;
+        if( $line =~ /\\name\{(.*)\}/ )
+        {
+            $f = $1;
+            if( $open )
+            {
+                close( OUT );
+            }
+            open( OUT, ">$f.rd" );
+            $open = "true";
+        }
+        if( $open )
+        {
+        	print OUT $line . "\n";
+        }
+    }
+}
+close(OUT);
diff --git a/exec/prebuild.sh b/exec/prebuild.sh
new file mode 100644
index 0000000..891a67e
--- /dev/null
+++ b/exec/prebuild.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# Produce the Rd-files for the documentation from the R source files
+# 
+# Prerequisites:
+#   - Perl
+#   - R_HOME must be set to the directory where R is installed
+
+echo "#### starting prebuild.sh"
+
+cd ..
+mkdir -p man
+cd man
+find ../R -name '*.[rR]' -exec cat \{\} \; | perl ../exec/make_rd.pl 
+cd ../exec
+
+echo "#### prebuild.sh completed!"
diff --git a/inst/ANNOUNCEMENT-0.8.txt b/inst/ANNOUNCEMENT-0.8.txt
new file mode 100644
index 0000000..6ac8a3a
--- /dev/null
+++ b/inst/ANNOUNCEMENT-0.8.txt
@@ -0,0 +1,26 @@
+Dear R-Core team,
+Dear Rcpp team and other package teams,
+Dear R users,
+
+The new package 'bit64' is available on CRAN for beta-testing and code-reviewing.
+
+Package 'bit64' provides fast serializable S3 atomic 64bit (signed) integers that can be used in vectors, matrices, arrays and data.frames. Methods are available for coercion from and to logicals, integers, doubles, characters as well as many elementwise and summary functions. 
+
+Package 'bit64' has the following advantages over package 'int64' (which was sponsored by Google):
+- true atomic vectors usable with length, dim, names etc.
+- only S3, not S4 class system used to dispatch methods
+- less RAM consumption by factor 7 (under 64 bit OS)
+- faster operations by factor 4 to 2000 (under 64 bit OS)
+- no slow-down of R's garbage collection (as caused by the pure existence of 'int64' objects)
+- pure GPL, no copyrights from transnational commercial company
+
+While the advantage of the atomic S3 design over the complicated S4 object design is obvious, it is less obvious that an external package is the best way to enrich R with 64bit integers. An external package will not give us literals such as 1LL or directly allow us to address larger vectors than possible with base R. But it allows us to properly address larger vectors in other packages such as 'ff' or 'bigmemory' and it allows us to properly work with large surrogate keys from external d [...]
+
+For those who still hope that R's 'integer' will be 64bit some day, here is my key learning: migrating R's 'integer' from 32 to 64 bit would be RAM expensive. It would most likely require to also migrate R's 'double' from 64 to 128 bit - in order to again have a data type to which we can lossless coerce. The assumption that 'integer' is a proper subset of 'double' is scattered over R's semantics. We all expect that binary and n-ary functions such as '+' and 'c' do return 'double' and do  [...]
+
+Since this package is 'at risk' to create a lot of dependencies from other packages, I'd appreciate serious  beta-testing and also code-review from the R-Core team. Please check the 'Limitations' sections at the help page and the numerics involving "long double" in C. If the conclusion is that this should be better done in Base R - I happly donate the code and drop this package. If we have to go with an external package for 64bit integers, it would be great if this work could convince th [...]
+
+Best regards
+
+Jens Oehlschlägel
+Munich, 11.2.2012
diff --git a/inst/ANNOUNCEMENT-0.9-Details.txt b/inst/ANNOUNCEMENT-0.9-Details.txt
new file mode 100644
index 0000000..5fc7167
--- /dev/null
+++ b/inst/ANNOUNCEMENT-0.9-Details.txt
@@ -0,0 +1 @@
+I have used package 'bit64' as a testbed to explore a couple of approaches for implementing R's univariate algorithmic functionality efficiently. I have focused on single-threaded efficiency for two reasons: 1) Amdahl's law dictates that the more we parallelize, the more we depend on serial efficiency. 2) When working with truly big data it is not only absolute speed but also energy consumption that we care about. 

Under the hood package 'bit64' has multiple implementations of the same functionality, and high-level functions contain (yet simple heuristic) optimizers that choose among the available low-level functions. For example 'match' can choose between eight functions based on hashing or sorting/ordering. 

Function 'match' (and '%in%') has been accelerated by complementing lookup of 'x' in hashed 'table' by reverse lookup of 'table' in hashed 'x'. If 'x' is small and 'table' is big, reverse lookup avoids the cost of building a huge hashmap. As suggested in Simon Urbanek's package 'fastmatch', if 'match' is called multiple times with the same 'table', performance can be improved by re-using the hashmap implicitely built by 'match'. Beyond that, I have realized a couple of improvements:

1) Building the hashmap has now been singled out in a separate function 'hashmap' that explicitely returns an environment of class c("cache_integer64", "cache", "environment") containing the hashmap and some auxilliary data. 

2) Instead of implicitely caching the hashmap as a side-effect when calling 'fastmatch', there are explicit functions for caching, for example 'hashcache' for attaching a cache with a hashmap, and 'remcache' for removing any cached data. 

3) If the 'hashcache' function after hashing discovers that the number of unique values is much smaller than the total number of values, it will hash again using a much smaller hashmap: this typically saves a lot of RAM and accelerates usage of the hashmap because it reduces random access.

4) The cache layer has a mechanism for detecting outdated caches. This is even more important in the case of a cached hashmap, since R's typical hashmap only contains index pointers to the data, not the data itself (unlike in standard hashtables). As a result, an outdated cache might lead to a crash, if the data has changed since creation of the cached hashmap. The detection mechanism comes for free, since R does Copy-on-write and each change of a vector leads to memory reallocation: on e [...]

5) Beyond 'match', the package leverages speed gains of hashing or cached hashing for a couple of other high-level functions: '%in%', 'duplicated', 'unique', 'unipos' and 'table'. However, it turned out that only 'match', '%in%' and 'duplicated' benefit from a cached hashmap. For 'unique', 'unipos' and 'table' the cost of traversing an existing hashmap is as high as creating the hashmap from scratch. That leads to the undesireable effect that we need two implementations for each of these  [...]

6) Beyond leveraging hashing, all these high-level functions also have two low-level implementations that take advantage of (cached) ordering and (cached) sortordering instead (see order below). 

6) Additional functions are implemented that benefit only from (cached) ordering and (cached) sortordering: 'sort', 'order', 'tiepos', 'keypos', 'rank', 'quantile' and dependants thereof ('median','summary','as.factor','as.ordered','table'). 

Method 'sort' is a cache-aware wrapper around 'ramsort', which depending on context chooses from multiple sorting algorithms (or from the cache): 'shellsort' (R's traditional inplace sorting algorithm), 'quicksort' (faster inplace), 'mergesort' (fast and stable), 'radixsort' (stable with linear scalability, for large datasets). The quicksort algorithm implemented here is in this context faster than the famous one of Bentley and McIllroy. It uses median of three random pivots and is like i [...]

Function 'order.integer64' with option 'optimize = "memory"' calls 'ramorder' which chooses from a similar set of low-level algorithms. 'ramorder' - like in package 'ff' - is faster than ordering in Base R, but like 'order' in Base R still does the job by sorting index pointers to the data which creates heavy random access to the data. The novel 'ramsortorder' method realizes ordering close to the speed of sorting, by sorting index and data simultaneously and thereby avoiding heavy random [...]

Function 'rank.integer64' implements only 'ties.method = "average"' and 'na.last="keep"' (the only sensible default, see e.g. 'cor'). Function 'prank.integer64'  projects the values [min..max] via ranks [1..n] to [0..1]. 'qtile.integer64' is the inverse function of 'prank.integer64' and projects [0..1] to [min..max]. 'quantile.integer64' with 'type=0' and 'median.integer64' are convenience wrappers to 'qtile'. 'qtile' behaves very similar to 'quantile.default' with 'type=1' in that it onl [...]

Function 'table.integer64' leverages hashing or sorting for counting frequencies of all unique values. This is by factor 3 slower than 'tabulate', but when called with 'return="list"' is by order of magnitude faster than 'table' (because 'table' wastes a lot of performance in large scale raw data manipulation before calling tabulate and in attaching the unique values as 'names' which loads heavy on the global string cache). When dealing with combinations of input vectors, 'table.integer64 [...]

I compared the speed gains of hashing+usage versus sortordering+usage over a couple of univariate algorithmic operations: hashing and sortordering are competitive, with hashing rather winning for smaller and sortordering rather winning for larger vectors (due to better cache-obliviousness of sorting). The third option - ordering - is much slower, though competitive with Base R, and 50% RAM saving makes this an interesting option, especially when working with datasets close to the RAM limi [...]
- sortordering supports more functionality than hashing
- sortordering gives better modularity (different from hashing, we can well separate *creating* and *using* the sortordering, because sorting permanently improves cache-locality)
- without computational costs of keeping the original order ('keep.order=TRUE' in 'unique' and 'table'), sortorder gives sorted results while hashing gives random result order. If there are many unique values, fixing random order by sorting afterwards kills any performance benefit of hashing, compare for example the sequence {y <- unique(x); ind <- sort.list(y)} in 'factor'.
- sorting better generalizes to very large data on disk compared to hashing
- it is easier to lockfree parallelize sorting compared to hashing
- creating the ordering quickly via sortordering and then caching only ordering (without the sorted data) is an interesting option to save RAM without too much speed loss
- with ordering instead of sortordering there is an option to work with large borderline-sized datasets in-RAM 

These advantages of sorting over hashing are good news for my novel energy-efficient greeNsort® algorithms.

The long term roadmap for packages 'bit64' and 'ff' is
- demonstrate power of greeNsort® by accelerating integer64 sorting by yet another factor 2
- parallelization of important functions in bit64
- unifying the sort capabilities in ff with those in bit64 (logical, factor, integer, integer64, double)
- generalizing the fast data management to all numeric data types (integer, integer64, double)
- removing the 2^31-1 address limit in ff (rather using integer64 than double)
- providing ff with proper disk sorting (reducing n*log(n) passes to 2 passes over the memory-mapped disk)

© 2010-2012 Jens Oehlschlägel
\ No newline at end of file
diff --git a/inst/ANNOUNCEMENT-0.9.txt b/inst/ANNOUNCEMENT-0.9.txt
new file mode 100644
index 0000000..36568a1
--- /dev/null
+++ b/inst/ANNOUNCEMENT-0.9.txt
@@ -0,0 +1,11 @@
+Dear R community,
+
+The new version of package 'bit64' - which extends R with fast 64-bit integers - now has fast (single-threaded) implementations of the most important univariate algorithmic operations (those based on hashing and sorting). Package 'bit64' now has methods for 'match', '%in%', 'duplicated', 'unique', 'table', 'sort', 'order', 'rank', 'quantile', 'median' and 'summary'. Regarding data management it has novel generics 'unipos' (positions of the unique values), 'tiepos' (positions of ties), 'k [...]
+
+Since the package covers the most important functions for (univariate) data exploration and data management, I think it is now appropriate to claim that R has sound 64-bit integer support, for example for working with keys or counts imported from large databases. For details concerning approach, implementation and roadmap please check the ANNOUNCEMENT-0.9-Details.txt file and the package help files.
+
+Kind regards
+
+
+Jens Oehlschlägel
+Munich, 22.10.2012
diff --git a/inst/README_devel.txt b/inst/README_devel.txt
new file mode 100644
index 0000000..d5c70a9
--- /dev/null
+++ b/inst/README_devel.txt
@@ -0,0 +1,10 @@
+Naming conventions
+==================
+R/*.R 		R   files (including .Rd comments)
+src/*.c		C   files 
+man/*.Rd	Automatically generated Rd. files, do not modify
+
+Rd api
+======
+prebuild.sh		call manually for generating all .Rd files from the .Rd comments in the R files with the help of
+exec/make_rd.pl		converts "#! lines" in R/*.R files into man/<name>.Rd files, where <name> is derived from the "#! \name{<name>}" in the first line
diff --git a/man/as.character.integer64.rd b/man/as.character.integer64.rd
new file mode 100644
index 0000000..4a85d9c
--- /dev/null
+++ b/man/as.character.integer64.rd
@@ -0,0 +1,47 @@
+\name{as.character.integer64}
+\alias{as.character.integer64}
+\alias{as.double.integer64}
+\alias{as.integer.integer64}
+\alias{as.logical.integer64}
+\alias{as.bitstring}
+\alias{as.bitstring.integer64}
+\alias{as.factor.integer64}
+\alias{as.ordered.integer64}
+\title{
+   Coerce from integer64
+}
+\description{
+  Methods to coerce integer64 to other atomic types. 
+  'as.bitstring' coerces to a human-readable bit representation (strings of zeroes and ones). 
+  The methods \code{\link{format}}, \code{\link{as.character}}, \code{\link{as.double}},
+  \code{\link{as.logical}}, \code{\link{as.integer}} do what you would expect.
+}
+\usage{
+ as.bitstring(x, \dots)
+ \method{as.bitstring}{integer64}(x, \dots)
+ \method{as.character}{integer64}(x, \dots)
+ \method{as.double}{integer64}(x, keep.names = FALSE, \dots)
+ \method{as.integer}{integer64}(x, \dots)
+ \method{as.logical}{integer64}(x, \dots)
+ \method{as.factor}{integer64}(x)
+ \method{as.ordered}{integer64}(x)
+}
+\arguments{
+  \item{x}{ an integer64 vector }
+  \item{keep.names}{ FALSE, set to TRUE to keep a names vector }
+  \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+}
+\value{
+  \code{as.bitstring} returns a string of . \cr
+  The other methods return atomic vectors of the expected types
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{as.integer64.character}} \code{\link{integer64}}  }
+\examples{
+  as.character(lim.integer64())
+  as.bitstring(lim.integer64())
+}
diff --git a/man/as.data.frame.integer64.rd b/man/as.data.frame.integer64.rd
new file mode 100644
index 0000000..4ed29d7
--- /dev/null
+++ b/man/as.data.frame.integer64.rd
@@ -0,0 +1,37 @@
+\name{as.data.frame.integer64}
+\alias{as.data.frame.integer64}
+\title{
+   integer64: Coercing to data.frame column
+}
+\description{
+  Coercing integer64 vector to data.frame.
+}
+\usage{
+  \method{as.data.frame}{integer64}(x, \dots)
+}
+\arguments{
+  \item{x}{ an integer64 vector }
+  \item{\dots}{ passed to NextMethod \code{\link{as.data.frame}} after removing the 'integer64' class attribute }
+}
+\value{
+  a one-column data.frame containing an integer64 vector
+}
+\details{
+  'as.data.frame.integer64' is rather not intended to be called directly,
+  but it is required to allow integer64 as data.frame columns.
+}
+\note{
+  This is currently very slow -- any ideas for improvement?
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ 
+  \code{\link{cbind.integer64}} \code{\link{integer64}}  %as.vector.integer64 removed as requested by the CRAN maintainer \code{\link{as.vector.integer64}} 
+}
+\examples{
+  as.data.frame.integer64(as.integer64(1:12))
+  data.frame(a=1:12, b=as.integer64(1:12))
+}
diff --git a/man/as.integer64.character.rd b/man/as.integer64.character.rd
new file mode 100644
index 0000000..6c1a77b
--- /dev/null
+++ b/man/as.integer64.character.rd
@@ -0,0 +1,48 @@
+\name{as.integer64.character}
+\alias{as.integer64}
+\alias{as.integer64.integer64}
+\alias{as.integer64.NULL}
+\alias{as.integer64.character}
+\alias{as.integer64.double}
+\alias{as.integer64.integer}
+\alias{as.integer64.logical}
+\alias{as.integer64.factor}
+\alias{NA_integer64_}
+\title{
+   Coerce to integer64
+}
+\description{
+  Methods to coerce from other atomic types to integer64. 
+}
+\usage{
+ NA_integer64_
+ as.integer64(x, \dots)
+ \method{as.integer64}{integer64}(x, \dots)
+ \method{as.integer64}{NULL}(x, \dots)
+ \method{as.integer64}{character}(x, \dots)
+ \method{as.integer64}{double}(x, keep.names = FALSE, \dots)
+ \method{as.integer64}{integer}(x, \dots)
+ \method{as.integer64}{logical}(x, \dots)
+ \method{as.integer64}{factor}(x, \dots)
+}
+\arguments{
+  \item{x}{ an atomic vector }
+  \item{keep.names}{ FALSE, set to TRUE to keep a names vector }
+  \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+}
+\details{
+  \code{as.integer64.character} is realized using C function \code{strtoll} which does not support scientific notation. 
+  Instead of '1e6' use '1000000'.
+}
+\value{
+  The other methods return atomic vectors of the expected types
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{as.character.integer64}} \code{\link{integer64}}  }
+\examples{
+  as.integer64(as.character(lim.integer64()))
+}
diff --git a/man/benchmark64.data.rd b/man/benchmark64.data.rd
new file mode 100644
index 0000000..7844c98
--- /dev/null
+++ b/man/benchmark64.data.rd
@@ -0,0 +1,39 @@
+\name{benchmark64.data}
+\alias{benchmark64.data}
+\docType{data}
+\title{
+ Results of performance measurement on a Core i7 Lenovo T410 8 GB RAM under Windows 7 64bit
+}
+\description{
+  These are the results of calling \code{\link{benchmark64}}
+}
+\usage{data(benchmark64.data)}
+\format{
+  The format is:
+ num [1:16, 1:6] 2.55e-05 2.37 2.39 1.28 1.39 ...
+ - attr(*, "dimnames")=List of 2
+  ..$ : chr [1:16] "cache" "match(s,b)" "s \%in\% b" "match(b,s)" ...
+  ..$ : chr [1:6] "32-bit" "64-bit" "hashcache" "sortordercache" ...
+}
+\examples{
+data(benchmark64.data)
+print(benchmark64.data)
+matplot(log2(benchmark64.data[-1,1]/benchmark64.data[-1,])
+, pch=c("3", "6", "h", "s", "o", "a")
+, xlab="tasks [last=session]"
+, ylab="log2(relative speed) [bigger is better]"
+)
+matplot(t(log2(benchmark64.data[-1,1]/benchmark64.data[-1,]))
+, axes=FALSE
+, type="b"
+, lwd=c(rep(1, 14), 3)
+, xlab="context"
+, ylab="log2(relative speed) [bigger is better]"
+)
+axis(1
+, labels=c("32-bit", "64-bit", "hash", "sortorder", "order", "hash+sortorder")
+, at=1:6
+)
+axis(2)
+}
+\keyword{datasets}
diff --git a/man/benchmark64.rd b/man/benchmark64.rd
new file mode 100644
index 0000000..07728d4
--- /dev/null
+++ b/man/benchmark64.rd
@@ -0,0 +1,133 @@
+\name{benchmark64}
+\alias{benchmark64}
+\alias{optimizer64}
+\title{
+ Function for measuring algorithmic performance \cr 
+ of high-level and low-level integer64 functions
+}
+\description{
+ \code{benchmark64} compares high-level integer64 functions against the integer functions from Base R \cr
+ \code{optimizer64} compares for each high-level integer64 function the Base R integer function with several low-level integer64 functions with and without caching \cr
+}
+\usage{
+benchmark64(nsmall = 2^16, nbig = 2^25, timefun = repeat.time
+)
+optimizer64(nsmall = 2^16, nbig = 2^25, timefun = repeat.time
+, what = c("match", "\%in\%", "duplicated", "unique", "unipos", "table", "rank", "quantile")
+, uniorder = c("original", "values", "any")
+, taborder = c("values", "counts")
+, plot = TRUE
+)
+}
+\arguments{
+  \item{nsmall}{ size of smaller vector }
+  \item{nbig}{ size of larger bigger vector }
+  \item{timefun}{ a function for timing such as \code{\link[bit]{repeat.time}} or \code{\link{system.time}} }
+  \item{what}{
+ a vector of names of high-level functions
+}
+  \item{uniorder}{
+ one of the order parameters that are allowed in \code{\link{unique.integer64}} and \code{\link{unipos.integer64}}
+}
+  \item{taborder}{
+ one of the order parameters that are allowed in \code{\link{table.integer64}} 
+}
+  \item{plot}{
+ set to FALSE to suppress plotting 
+}
+}
+\details{
+ \code{benchmark64} compares the following scenarios for the following use cases: 
+ \tabular{rl}{
+  \bold{scenario name} \tab \bold{explanation} \cr
+  32-bit  \tab applying Base R function to 32-bit integer data \cr
+  64-bit \tab applying bit64 function to 64-bit integer data (with no cache) \cr
+  hashcache \tab dito when cache contains \code{\link{hashmap}}, see \code{\link{hashcache}} \cr
+  sortordercache \tab dito when cache contains sorting and ordering, see \code{\link{sortordercache}} \cr
+  ordercache \tab dito when cache contains ordering only, see \code{\link{ordercache}} \cr
+  allcache \tab dito when cache contains sorting, ordering and hashing \cr
+ }
+ \tabular{rl}{
+  \bold{use case name} \tab \bold{explanation} \cr
+  cache         \tab filling the cache according to scenario \cr
+  match(s,b)    \tab match small in big vector \cr
+  s \%in\% b      \tab small \%in\% big vector \cr
+  match(b,s)    \tab match big in small vector \cr
+  b \%in\% s      \tab big \%in\% small vector \cr
+  match(b,b)    \tab match big in (different) big vector \cr
+  b \%in\% b      \tab big \%in\% (different) big vector \cr
+  duplicated(b) \tab duplicated of big vector \cr
+  unique(b)     \tab unique of big vector \cr
+  table(b)      \tab table of big vector \cr
+  sort(b)       \tab sorting of big vector \cr
+  order(b)      \tab ordering of big vector \cr
+  rank(b)       \tab ranking of big vector \cr
+  quantile(b)   \tab quantiles of big vector \cr
+  summary(b)    \tab summary of of big vector \cr
+  SESSION       \tab exemplary session involving multiple calls (including cache filling costs) \cr
+ }
+ Note that the timings for the cached variants do \emph{not} contain the time costs of building the cache, except for the timing of the exemplary user session, where the cache costs are included in order to evaluate amortization. 
+}
+\value{
+ \code{benchmark64} returns a matrix with elapsed seconds, different high-level tasks in rows and different scenarios to solve the task in columns. The last row named 'SESSION' contains the elapsed seconds of the exemplary sesssion.
+ \cr
+ \code{optimizer64} returns a dimensioned list with one row for each high-level function timed and two columns named after the values of the \code{nsmall} and \code{nbig} sample sizes. Each list cell contains a matrix with timings, low-level-methods in rows and three measurements \code{c("prep","both","use")} in columns. If it can be measured separately, \code{prep} contains the timing of preparatory work such as sorting and hashing, and \code{use} contains the timing of using the prepar [...]
+}
+\author{
+ Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+ \code{\link{integer64}}
+}
+\examples{
+message("this small example using system.time does not give serious timings\n
+this we do this only to run regression tests")
+benchmark64(nsmall=2^7, nbig=2^13, timefun=function(expr)system.time(expr, gcFirst=FALSE))
+optimizer64(nsmall=2^7, nbig=2^13, timefun=function(expr)system.time(expr, gcFirst=FALSE)
+, plot=FALSE
+)
+\dontrun{
+message("for real measurement of sufficiently large datasets run this on your machine")
+benchmark64()
+optimizer64()
+}
+message("let's look at the performance results on Core i7 Lenovo T410 with 8 GB RAM")
+data(benchmark64.data)
+print(benchmark64.data)
+
+matplot(log2(benchmark64.data[-1,1]/benchmark64.data[-1,])
+, pch=c("3", "6", "h", "s", "o", "a") 
+, xlab="tasks [last=session]"
+, ylab="log2(relative speed) [bigger is better]"
+)
+matplot(t(log2(benchmark64.data[-1,1]/benchmark64.data[-1,]))
+, type="b", axes=FALSE 
+, lwd=c(rep(1, 14), 3)
+, xlab="context"
+, ylab="log2(relative speed) [bigger is better]"
+)
+axis(1
+, labels=c("32-bit", "64-bit", "hash", "sortorder", "order", "hash+sortorder")
+, at=1:6
+)
+axis(2)
+data(optimizer64.data)
+print(optimizer64.data)
+oldpar <- par(no.readonly = TRUE)
+par(mfrow=c(2,1))
+par(cex=0.7)
+for (i in 1:nrow(optimizer64.data)){
+ for (j in 1:2){
+   tim <- optimizer64.data[[i,j]]
+  barplot(t(tim))
+  if (rownames(optimizer64.data)[i]=="match")
+   title(paste("match", colnames(optimizer64.data)[j], "in", colnames(optimizer64.data)[3-j]))
+  else if (rownames(optimizer64.data)[i]=="\%in\%")
+   title(paste(colnames(optimizer64.data)[j], "\%in\%", colnames(optimizer64.data)[3-j]))
+  else
+   title(paste(rownames(optimizer64.data)[i], colnames(optimizer64.data)[j]))
+ }
+}
+par(mfrow=c(1,1))
+}
+\keyword{ misc }
diff --git a/man/bit64-package.rd b/man/bit64-package.rd
new file mode 100644
index 0000000..8ebfc8a
--- /dev/null
+++ b/man/bit64-package.rd
@@ -0,0 +1,892 @@
+\name{bit64-package}
+\alias{bit64-package}
+\alias{bit64}
+\alias{integer64}
+\alias{is.integer64}
+\alias{is.integer.integer64}
+\alias{is.vector.integer64}
+%as.vector.integer64 removed as requested by the CRAN maintainer \alias{as.vector.integer64}
+\alias{length<-.integer64}
+\alias{print.integer64}
+\alias{str.integer64}
+\docType{package}
+\title{
+   A S3 class for vectors of 64bit integers
+}
+\description{
+Package 'bit64' provides fast serializable S3 atomic 64bit (signed) integers 
+that can be used in vectors, matrices, arrays and data.frames. Methods are 
+available for coercion from and to logicals, integers, doubles, characters  
+and factors as well as many elementwise and summary functions. 
+\cr
+\bold{Version 0.8}
+With 'integer64' vectors you can store very large integers at the expense
+of 64 bits, which is by factor 7 better than 'int64' from package 'int64'.
+Due to the smaller memory footprint, the atomic vector architecture and  
+using only S3 instead of S4 classes, most operations are one to three orders 
+of magnitude faster: Example speedups are 4x for serialization, 250x for 
+adding, 900x for coercion and 2000x for object creation. Also 'integer64' 
+avoids an ongoing (potentially infinite) penalty for garbage collection
+observed during existence of 'int64' objects (see code in example section). 
+\cr
+\bold{Version 0.9}
+Package 'bit64' - which extends R with fast 64-bit integers - now has fast
+(single-threaded) implementations the most important univariate algorithmic 
+operations (those based on hashing and sorting). We now have methods for 
+'match', '%in%', 'duplicated', 'unique', 'table', 'sort', 'order', 'rank', 
+'quantile', 'median' and 'summary'. Regarding data management we also have 
+novel generics 'unipos' (positions of the unique values), 'tiepos' (
+positions of ties), 'keypos' (positions of foreign keys in a sorted 
+dimension table) and derived methods 'as.factor' and 'as.ordered'. This 64-
+bit functionality is implemented carefully to be not slower than the 
+respective 32-bit operations in Base R and also to avoid outlying waiting 
+times observed with 'order', 'rank' and 'table' (speedup factors 20/16/200 
+respective). This increases the dataset size with wich we can work truly 
+interactive. The speed is achieved by simple heuristic optimizers in high-
+level functions choosing the best from multiple low-level algorithms and 
+further taking advantage of a novel caching if activated. In an example R 
+session using a couple of these operations the 64-bit integers performed 22x
+ faster than base 32-bit integers, hash-caching improved this to 24x, 
+sortorder-caching was most efficient with 38x (caching hashing and sorting 
+is not worth it with 32x at duplicated RAM consumption).
+}
+\usage{
+ integer64(length)
+ \method{is}{integer64}(x)
+ \method{length}{integer64}(x) <- value
+ \method{print}{integer64}(x, quote=FALSE, \dots)
+ \method{str}{integer64}(object, vec.len  = strO$vec.len, give.head = TRUE, give.length = give.head, \dots)
+}
+\arguments{
+  \item{length}{ length of vector using \code{\link{integer}} }
+  \item{x}{ an integer64 vector }
+  \item{object}{ an integer64 vector }
+  \item{value}{ an integer64 vector of values to be assigned }
+  \item{quote}{ logical, indicating whether or not strings should be printed with surrounding quotes. }
+  \item{vec.len}{ see \code{\link[utils]{str}} }
+  \item{give.head}{ see \code{\link[utils]{str}} }
+  \item{give.length}{ see \code{\link[utils]{str}} }
+  \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+}
+\details{
+\tabular{ll}{
+   Package: \tab bit64\cr
+   Type: \tab Package\cr
+   Version: \tab 0.5.0\cr
+   Date: \tab 2011-12-12\cr
+   License: \tab GPL-2\cr
+   LazyLoad: \tab yes\cr
+   Encoding: \tab latin1\cr
+}
+}
+\section{Design considerations}{
+  64 bit integers are related to big data: we need them to overcome address space limitations. 
+  Therefore performance of the 64 bit integer type is critical. 
+  In the S language -- designed in 1975 -- atomic objects were defined to be vectors for a couple of good reasons:
+  simplicity, option for implicit parallelization, good cache locality. 
+  In recent years many analytical databases have learnt that lesson: column based data bases provide superior performance
+  for many applications, the result are products such as MonetDB, Sybase IQ, Vertica, Exasol, Ingres Vectorwise.
+  If we introduce 64 bit integers not natively in Base R but as an external package, we should at least strive to 
+  make them as 'basic' as possible. Therefore the design choice of bit64 not only differs from \code{int64}, it is obvious: 
+  Like the other atomic types in Base R, we model data type 'integer64' as a contiguous \code{\link{atomic}} vector in memory, 
+  and we use the more basic \code{\link{S3}} class system, not \code{\link{S4}}. Like package \code{int64} we want our 'integer64' to be \code{\link{serialize}able}, 
+  therefore we also use an existing data type as the basis. Again the choice is obvious: R has only one 64 bit data type: doubles.
+  By using \code{\link{double}s}, \code{integer64} \code{\link{inherits}} some functionality such as \code{\link{is.atomic}}, \code{\link{length}}, 
+  \code{\link{length<-}}, \code{\link{names}}, \code{\link{names<-}}, \code{\link{dim}}, \code{\link{dim<-}}, \code{\link{dimnames}}, \code{\link{dimnames}}.
+  \cr
+  Our R level functions strictly follow the functional programming paragdim: 
+  no modification of arguments or other sideffects. Before version 0.93  we internally deviated from the strict paradigm
+  in order to boost performance. Our C functions do not create new return values, 
+  instead we pass-in the memory to be returned as an argument. This gives us the freedom to apply the C-function 
+  to new or old vectors, which helps to avoid unnecessary memory allocation, unnecessary copying and unnessary garbage collection.
+  Prior to 0.93 \emph{within} our R functions we also deviated from conventional R programming by not using \code{\link{attr<-}} and \code{\link{attributes<-}} 
+  because they always did new memory allocation and copying in older R versions. If we wanted to set attributes of return values that we have freshly created,
+  we instead used functions \code{\link[bit]{setattr}} and \code{\link[bit]{setattributes}} from package \code{\link[bit]{bit}}. 
+  From version 0.93 \code{\link[bit]{setattr}} is only used for manipulating \code{\link{cache}} objects, in \code{\link{ramsort.integer64}} and \code{\link{sort.integer64}} and in \code{\link{as.data.frame.integer64}}.
+}
+\section{Arithmetic precision and coercion}{
+  The fact that we introduce 64 bit long long integers -- without introducing 128-bit long doubles -- creates some subtle challenges:
+  Unlike 32 bit \code{\link{integer}s}, the \code{integer64} are no longer a proper subset of \code{\link{double}}. 
+  If a binary arithmetic operation does involve a \code{double} and a \code{integer}, it is a no-brainer to return \code{double} 
+  without loss of information. If an \code{integer64} meets a \code{double}, it is not trivial what type to return. 
+  Switching to \code{integer64} limits our ability to represent very large numbers, switching to \code{double} limits our ability 
+  to distinguish \code{x} from \code{x+1}. Since the latter is the purpose of introducing 64 bit integers, we usually return \code{integer64} 
+  from functions involving \code{integer64}, for example in \code{\link[=c.integer64]{c}}, \code{\link[=cbind.integer64]{cbind}} 
+  and \code{\link[=rbind.integer64]{rbind}}. 
+  \cr
+  Different from Base R, our operators \code{\link[=+.integer64]{+}}, 
+  \code{\link[=-.integer64]{-}}, \code{\link[=\%/\%.integer64]{\%/\%}} and \code{\link[=\%\%.integer64]{\%\%}} coerce their arguments to 
+  \code{integer64} and always return \code{integer64}. 
+  \cr
+  The multiplication operator \code{\link[=*.integer64]{*}} coerces its first argument to \code{integer64} 
+  but allows its second argument to be also \code{double}: the second argument is internaly coerced to 'long double' 
+  and the result of the multiplication is returned as \code{integer64}. 
+  \cr
+  The division \code{\link[=/.integer64]{/}} and power \code{\link[=^.integer64]{^}} operators also coerce their first argument to \code{integer64} 
+  and coerce internally their second argument to 'long double', they return as \code{double}, like \code{\link[=sqrt.integer64]{sqrt}}, 
+  \code{\link[=log.integer64]{log}}, \code{\link[=log2.integer64]{log2}} and \code{\link[=log10.integer64]{log10}} do. 
+
+ \tabular{ccccccccc}{
+  \bold{argument1} \tab \bold{op} \tab \bold{argument2} \tab \bold{->} \tab \bold{coerced1} \tab \bold{op} \tab \bold{coerced2} \tab \bold{->} \tab \bold{result} \cr
+  integer64 \tab + \tab double \tab -> \tab integer64 \tab + \tab integer64 \tab -> \tab integer64 \cr
+  double \tab + \tab integer64 \tab -> \tab integer64 \tab + \tab integer64 \tab -> \tab integer64 \cr
+  integer64 \tab - \tab double \tab -> \tab integer64 \tab - \tab integer64 \tab -> \tab integer64 \cr
+  double \tab - \tab integer64 \tab -> \tab integer64 \tab - \tab integer64 \tab -> \tab integer64 \cr
+  integer64 \tab \%/\% \tab double \tab -> \tab integer64 \tab \%/\% \tab integer64 \tab -> \tab integer64 \cr
+  double \tab \%/\% \tab integer64 \tab -> \tab integer64 \tab \%/\% \tab integer64 \tab -> \tab integer64 \cr
+  integer64 \tab \%\% \tab double \tab -> \tab integer64 \tab \%\% \tab integer64 \tab -> \tab integer64 \cr
+  double \tab \%\% \tab integer64 \tab -> \tab integer64 \tab \%\% \tab integer64 \tab -> \tab integer64 \cr
+  integer64 \tab * \tab double \tab -> \tab integer64 \tab * \tab long double \tab -> \tab integer64 \cr
+  double \tab * \tab integer64 \tab -> \tab integer64 \tab * \tab integer64 \tab -> \tab integer64 \cr
+  integer64 \tab / \tab double \tab -> \tab integer64 \tab / \tab long double \tab -> \tab double \cr
+  double \tab / \tab integer64 \tab -> \tab integer64 \tab / \tab long double \tab -> \tab double \cr
+  integer64 \tab ^ \tab double \tab -> \tab integer64 \tab / \tab long double \tab -> \tab double \cr
+  double \tab ^ \tab integer64 \tab -> \tab integer64 \tab / \tab long double \tab -> \tab double \cr
+ }
+}
+\section{Creating and testing S3 class 'integer64'}{
+  Our creator function \code{integer64} takes an argument \code{length}, creates an atomic double vector of this length,
+  attaches an S3 class attribute 'integer64' to it, and that's it. We simply rely on S3 method dispatch and interpret those 
+  64bit elements as 'long long int'. 
+  \cr
+ \code{\link{is.double}} currently returns TRUE for \code{integer64} and might return FALSE in a later release.
+ Consider \code{is.double} to have undefined behaviour and do query \code{is.integer64} \emph{before} querying \code{is.double}.
+%As a second line of defense against misinterpretation we make \code{\link{is.double}}
+%return \code{FALSE} by making it S3 generic and adding a method \code{\link{as.double.integer64}}. 
+  The methods \code{\link{is.integer64}} and \code{\link{is.vector}} both return \code{TRUE} for \code{integer64}. 
+ Note that we did not patch \code{\link{storage.mode}} and \code{\link{typeof}}, which both continue returning 'double' 
+ Like for 32 bit \code{\link{integer}}, \code{\link{mode}} returns 'numeric' and \code{\link{as.double}}) tries coercing to \code{\link{double}}).
+ It is likely that 'integer64' becomes a \code{\link[ff]{vmode}} in package \code{\link[ff]{ff}}. 
+ \cr
+ Further methods for creating \code{integer64} are \code{\link[=range.integer64]{range}} which returns the range of the data type if calles without arguments,
+ \code{\link[=rep.integer64]{rep}}, \code{\link[=seq.integer64]{seq}}. 
+ \cr
+ For all available methods on \code{integer64} vectors see the index below and the examples.
+}
+\section{Index of implemented methods}{
+\tabular{rrl}{
+   \bold{creating,testing,printing} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{NA_integer64_} \tab \code{\link{NA_integer_}} \tab NA constant \cr
+   \code{integer64} \tab \code{\link{integer}} \tab create zero atomic vector \cr
+   \code{\link{rep.integer64}} \tab \code{\link{rep}} \tab  \cr
+   \code{\link{seq.integer64}} \tab \code{\link{seq}} \tab  \cr
+   \code{\link{is.integer64}} \tab \code{\link{is}} \tab  \cr
+                                     \tab \code{\link{is.integer}} \tab inherited from Base R \cr
+   %\code{\link{is.double.integer64}} \tab \code{\link{is.double}} \tab  \cr
+   \code{\link{is.vector.integer64}} \tab \code{\link{is.vector}} \tab  \cr
+   \code{\link{identical.integer64}} \tab \code{\link{identical}} \tab  \cr
+   \code{\link{length<-.integer64}} \tab \code{\link{length<-}} \tab  \cr
+                                     \tab \code{\link{length}} \tab inherited from Base R \cr
+                                     \tab \code{\link{names<-}} \tab inherited from Base R \cr
+                                     \tab \code{\link{names}} \tab inherited from Base R \cr
+                                     \tab \code{\link{dim<-}} \tab inherited from Base R \cr
+                                     \tab \code{\link{dim}} \tab inherited from Base R \cr
+                                     \tab \code{\link{dimnames<-}} \tab inherited from Base R \cr
+                                     \tab \code{\link{dimnames}} \tab inherited from Base R \cr
+                                    \tab \code{\link{str}} \tab inherited from Base R, does not print values correctly \cr
+   \code{\link{print.integer64}} \tab \code{\link{print}} \tab  \cr
+   \code{\link{str.integer64}} \tab \code{\link{str}} \tab  \cr
+ \cr
+   \bold{coercing to integer64} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{as.integer64}} \tab   \tab generic \cr
+   \code{\link{as.integer64.character}} \tab \code{\link{character}} \tab  \cr
+   \code{\link{as.integer64.double}} \tab \code{\link{double}} \tab  \cr
+   \code{\link{as.integer64.integer}} \tab \code{\link{integer}} \tab  \cr
+   \code{\link{as.integer64.integer64}} \tab \code{integer64} \tab  \cr
+   \code{\link{as.integer64.logical}} \tab \code{\link{logical}} \tab  \cr
+   \code{\link{as.integer64.NULL}} \tab \code{\link{NULL}} \tab  \cr
+ \cr
+   \bold{coercing from integer64} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{as.bitstring}} \tab \code{\link{as.bitstring}} \tab generic \cr
+   \code{\link{as.bitstring.integer64}} \tab  \tab  \cr
+   \code{\link{as.character.integer64}} \tab \code{\link{as.character}} \tab  \cr
+   \code{\link{as.double.integer64}} \tab \code{\link{as.double}} \tab  \cr
+   \code{\link{as.integer.integer64}} \tab \code{\link{as.integer}} \tab  \cr
+   \code{\link{as.logical.integer64}} \tab \code{\link{as.logical}} \tab  \cr
+   %as.vector.integer64 removed as requested by the CRAN maintainer \code{\link{as.vector.integer64}} \tab \code{\link{as.vector}} \tab  \cr
+ \cr
+   \bold{data structures} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{c.integer64}} \tab \code{\link{c}} \tab vector concatenate \cr
+   \code{\link{cbind.integer64}} \tab \code{\link{cbind}} \tab column bind \cr
+   \code{\link{rbind.integer64}} \tab \code{\link{rbind}} \tab row bind \cr
+   \code{\link{as.data.frame.integer64}} \tab \code{\link{as.data.frame}} \tab coerce atomic object to data.frame \cr
+                                         \tab \code{\link{data.frame}} \tab inherited from Base R since we have coercion \cr
+ \cr
+   \bold{subscripting} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{[.integer64}} \tab \code{\link{[}} \tab vector and array extract \cr
+   \code{\link{[<-.integer64}} \tab \code{\link{[<-}} \tab vector and array assign \cr
+   \code{\link{[[.integer64}} \tab \code{\link{[[}} \tab scalar extract \cr
+   \code{\link{[[<-.integer64}} \tab \code{\link{[[<-}} \tab scalar assign \cr
+ \cr
+   \bold{binary operators} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{+.integer64}} \tab \code{\link{+}} \tab returns integer64 \cr
+   \code{\link{-.integer64}} \tab \code{\link{-}} \tab returns integer64 \cr
+   \code{\link{*.integer64}} \tab \code{\link{*}} \tab returns integer64 \cr
+   \code{\link{^.integer64}} \tab \code{\link{^}} \tab returns double \cr
+   \code{\link{/.integer64}} \tab \code{\link{/}} \tab returns double \cr
+   \code{\link{\%/\%.integer64}} \tab \code{\link{\%/\%}} \tab returns integer64 \cr
+   \code{\link{\%\%.integer64}} \tab \code{\link{\%\%}} \tab returns integer64 \cr
+ \cr
+   \bold{comparison operators} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{==.integer64}} \tab \code{\link{==}} \tab  \cr
+   \code{\link{!=.integer64}} \tab \code{\link{!=}} \tab  \cr
+   \code{\link{<.integer64}} \tab \code{\link{<}} \tab  \cr
+   \code{\link{<=.integer64}} \tab \code{\link{<=}} \tab  \cr
+   \code{\link{>.integer64}} \tab \code{\link{>}} \tab  \cr
+   \code{\link{>=.integer64}} \tab \code{\link{>=}} \tab  \cr
+ \cr
+   \bold{logical operators} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{!.integer64}} \tab \code{\link{!}} \tab  \cr
+   \code{\link{&.integer64}} \tab \code{\link{&}} \tab  \cr
+   \code{\link{|.integer64}} \tab \code{\link{|}} \tab  \cr
+   \code{\link{xor.integer64}} \tab \code{\link{xor}} \tab  \cr
+ \cr
+   \bold{math functions} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{is.na.integer64}} \tab \code{\link{is.na}} \tab returns logical \cr
+   \code{\link{format.integer64}} \tab \code{\link{format}} \tab returns character \cr
+   \code{\link{abs.integer64}} \tab \code{\link{abs}} \tab returns integer64 \cr
+   \code{\link{sign.integer64}} \tab \code{\link{sign}} \tab returns integer64 \cr
+   \code{\link{log.integer64}} \tab \code{\link{log}} \tab returns double \cr
+   \code{\link{log10.integer64}} \tab \code{\link{log10}} \tab  returns double \cr
+   \code{\link{log2.integer64}} \tab \code{\link{log2}} \tab  returns double \cr
+   \code{\link{sqrt.integer64}} \tab \code{\link{sqrt}} \tab  returns double \cr
+   \code{\link{ceiling.integer64}} \tab \code{\link{ceiling}} \tab dummy returning its argument \cr
+   \code{\link{floor.integer64}} \tab \code{\link{floor}} \tab dummy returning its argument \cr
+   \code{\link{trunc.integer64}} \tab \code{\link{trunc}} \tab dummy returning its argument \cr
+   \code{\link{round.integer64}} \tab \code{\link{round}} \tab dummy returning its argument \cr
+   \code{\link{signif.integer64}} \tab \code{\link{signif}} \tab dummy returning its argument \cr
+ \cr
+   \bold{cumulative functions} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{cummin.integer64}} \tab \code{\link{cummin}} \tab \cr
+   \code{\link{cummax.integer64}} \tab \code{\link{cummax}} \tab \cr
+   \code{\link{cumsum.integer64}} \tab \code{\link{cumsum}} \tab \cr
+   \code{\link{cumprod.integer64}} \tab \code{\link{cumprod}} \tab \cr
+   \code{\link{diff.integer64}} \tab \code{\link{diff}} \tab \cr
+ \cr
+   \bold{summary functions} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{range.integer64}} \tab \code{\link{range}} \tab \cr
+   \code{\link{min.integer64}} \tab \code{\link{min}} \tab  \cr
+   \code{\link{max.integer64}} \tab \code{\link{max}} \tab  \cr
+   \code{\link{sum.integer64}} \tab \code{\link{sum}} \tab  \cr
+   \code{\link{mean.integer64}} \tab \code{\link{mean}} \tab  \cr
+   \code{\link{prod.integer64}} \tab \code{\link{prod}} \tab  \cr
+   \code{\link{all.integer64}} \tab \code{\link{all}} \tab  \cr
+   \code{\link{any.integer64}} \tab \code{\link{any}} \tab  \cr
+ \cr
+   \bold{algorithmically complex functions} \tab \bold{see also}          \tab \bold{description (caching)}  \cr
+   \code{\link{match.integer64}} \tab \code{\link{match}} \tab position of x in table (h//o/so) \cr
+   \code{\link{\%in\%.integer64}} \tab \code{\link{\%in\%}} \tab is x in table? (h//o/so) \cr
+   \code{\link{duplicated.integer64}} \tab \code{\link{duplicated}} \tab is current element duplicate of previous one? (h//o/so) \cr
+   \code{\link{unique.integer64}} \tab \code{\link{unique}} \tab (shorter) vector of unique values only (h/s/o/so) \cr
+   \code{\link{unipos.integer64}} \tab \code{\link{unipos}} \tab positions corresponding to unique values (h/s/o/so) \cr
+   \code{\link{tiepos.integer64}} \tab \code{\link{tiepos}} \tab positions of values that are tied (//o/so) \cr
+   \code{\link{keypos.integer64}} \tab \code{\link{keypos}} \tab position of current value in sorted list of unique values (//o/so) \cr
+   \code{\link{as.factor.integer64}} \tab \code{\link{as.factor}} \tab convert to (unordered) factor with sorted levels of previous values (//o/so) \cr
+   \code{\link{as.ordered.integer64}} \tab \code{\link{as.ordered}} \tab convert to ordered factor with sorted levels of previous values (//o/so) \cr
+   \code{\link{table.integer64}} \tab \code{\link{table}} \tab unique values and their frequencies (h/s/o/so) \cr
+   \code{\link{sort.integer64}} \tab \code{\link{sort}} \tab sorted vector (/s/o/so) \cr
+   \code{\link{order.integer64}} \tab \code{\link{order}} \tab positions of elements that would create sorted vector (//o/so) \cr
+   \code{\link{rank.integer64}} \tab \code{\link{rank}} \tab (average) ranks of non-NAs, NAs kept in place (/s/o/so) \cr
+   \code{\link{quantile.integer64}} \tab \code{\link{quantile}} \tab (existing) values at specified percentiles (/s/o/so) \cr
+   \code{\link{median.integer64}} \tab \code{\link{median}} \tab (existing) value at percentile 0.5 (/s/o/so) \cr
+   \code{\link{summary.integer64}} \tab \code{\link{summary}} \tab  (/s/o/so) \cr
+ \cr
+   \bold{helper functions} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{\link{minusclass}} \tab \code{\link{minusclass}} \tab removing class attritbute \cr
+   \code{\link{plusclass}} \tab \code{\link{plusclass}} \tab inserting class attribute \cr
+   \code{\link{binattr}} \tab \code{\link{binattr}} \tab define binary op behaviour \cr
+ \cr
+   \bold{tested I/O functions} \tab \bold{see also}          \tab \bold{description} \cr
+                               \tab \code{\link{read.table}} \tab inherited from Base R \cr
+                               \tab \code{\link{write.table}} \tab inherited from Base R \cr
+                               \tab \code{\link{serialize}} \tab inherited from Base R \cr
+                               \tab \code{\link{unserialize}} \tab inherited from Base R \cr
+                               \tab \code{\link{save}} \tab inherited from Base R \cr
+                               \tab \code{\link{load}} \tab inherited from Base R \cr
+                               \tab \code{\link{dput}} \tab inherited from Base R \cr
+                               \tab \code{\link{dget}} \tab inherited from Base R \cr
+}
+}
+\section{Limitations inherited from implementing 64 bit integers via an external package}{
+  \itemize{
+    \item \bold{vector size} of atomic vectors is still limited to \code{\link{.Machine}$integer.max}. 
+    However, external memory extending packages such as \code{\link[ff]{ff}} or \code{bigmemory} 
+    can extend their address space now with \code{integer64}. Having 64 bit integers also help 
+    with those not so obvious address issues that arise once we exchange data with SQL databases 
+    and datawarehouses, which use big integers as surrogate keys, e.g. on indexed primary key columns.
+    This puts R into a relatively strong position compared to certain commercial statistical 
+    softwares, which sell database connectivity but neither have the range of 64 bit integers, 
+    nor have integers at all, nor have a single numeric data type in their macro-glue-language.
+
+    \item \bold{literals} such as \code{123LL} would require changes to Base R, up to then we need to write (and call) 
+    \code{as.integer64(123L)} or \code{as.integer64(123)} or \code{as.integer64('123')}. 
+    Only the latter allows to specify numbers beyond Base R's numeric data types and therefore is the recommended
+    way to use -- using only one way may facilitate migrating code to literals at a later stage.
+
+  }
+}
+\section{Limitations inherited from Base R, Core team, can you change this?}{
+  \itemize{
+    \item \bold{\code{\link{identical}}} with default parameters does not distinguish all bit-patterns of doubles. 
+    For testing purposes we provide a wrapper \code{\link{identical.integer64}} that will distinguish all bit-patterns.
+    It would be desireable to have a single call of \code{\link{identical}} handle both, \code{\link{double}} and \code{integer64}.
+
+    \item the \bold{colon} operator \code{\link{:}} officially does not dispatches S3 methods, however, we have made it generic
+     \preformatted{
+     from <- lim.integer64()[1]
+     to <- from+99
+     from:to
+   }
+   As a limitation remains: it will only dispatch at its first argument \code{from} but not at its second \code{to}.
+
+    \item \bold{\code{\link{is.double}}} does not dispatches S3 methods, However, we have made it generic 
+		and it will return \code{FALSE} on \code{integer64}.
+
+    \item \bold{\code{\link{c}}} only dispatches \code{\link{c.integer64}} if the first argument is \code{integer64}
+    and it does not recursively dispatch the proper method when called with argument \code{recursive=TRUE}
+    Therefore \preformatted{
+      c(list(integer64,integer64))
+    }
+     does not work and for now you can only call \preformatted{
+       c.integer64(list(x,x))
+     }
+
+    \item \bold{generic binary operators} fail to dispatch *any* user-defined S3 method 
+    if the two arguments have two different S3 classes. For example we have two classes 
+    \code{\link{bit}} and \code{\link{bitwhich}} sparsely representing boolean vectors 
+    and we have methods \code{\link{&.bit}} and \code{\link{&.bitwhich}}. For an expression
+    involving both as in \code{ bit & bitwhich}, none of the two methods is dispatched. 
+    Instead a standard method is dispatched, which neither handles \code{\link{bit}} 
+    nor \code{\link{bitwhich}}. Although it lacks symmetry, the better choice would be to 
+    dispatch simply the method of the class of the first argument in case of class conflict. 
+    This choice would allow authors of extension packages providing coherent behaviour 
+    at least within their contributed classes. But as long as none of the package authors 
+    methods is dispatched, he cannot handle the conflicting classes at all.
+
+    \item \bold{\code{\link{unlist}}} is not generic and if it were, we would face similar problems as with \code{c()}
+
+    \item \bold{\code{\link{vector}}} with argument \code{mode='integer64'} cannot work without adjustment of Base R
+    \item \bold{\code{\link{as.vector}}} with argument \code{mode='integer64'} cannot work without adjustment of Base R
+
+    \item \bold{\code{\link{is.vector}}} does not dispatch its method \code{\link{is.vector.integer64}}
+
+    \item \bold{\code{\link{mode<-}}} drops the class 'integer64' which is returned from \code{as.integer64}.
+       Also it does not remove an existing class 'integer64' when assigning mode 'integer'. 
+
+    \item \bold{\code{\link{storage.mode<-}}} does not support external data types such as \code{as.integer64}
+
+    \item \bold{\code{\link{matrix}}} does drop the 'integer64' class attribute.
+
+    \item \bold{\code{\link{array}}}  does drop the 'integer64' class attribute. 
+           In current R versions (1.15.1) this can be circumvented by activating the function 
+						\code{as.vector.integer64} further down this file.
+						However, the CRAN maintainer has requested to remove \code{as.vector.integer64}, 
+						even at the price of breaking previously working functionality of the package. 
+
+    \item \bold{\code{\link{str}}} does not print the values of \code{integer64} correctly
+
+  }
+}
+\section{further limitations}{
+  \itemize{
+    \item \bold{subscripting} non-existing elements and subscripting with \code{NA}s is currently not supported. 
+    Such subscripting currently returns \code{9218868437227407266} instead of \code{NA} (the \code{NA} value of the underlying double code).
+    Following the full R behaviour here would either destroy performance or require extensive C-coding. 
+  }
+}
+\note{
+   \code{integer64} are useful for handling database keys and exact counting in +-2^63.
+   Do not use them as replacement for 32bit integers, integer64 are not
+   supported for subscripting by R-core and they have different semantics 
+   when combined with double. Do understand that \code{integer64} can only be
+   useful over \code{double} if we do not coerce it to \code{double}. \cr
+  \cr
+  While \cr
+  integer + double -> double + double -> double \cr
+  or \cr
+  1L + 0.5 -> 1.5 \cr 
+  for additive operations we coerce to \code{integer64} \cr
+  integer64 + double ->  integer64 + integer64 -> integer64 \cr
+  hence \cr
+  as.integer64(1) + 0.5 -> 1LL + 0LL -> 1LL \cr
+  \cr
+  see section "Arithmetic precision and coercion" above
+}
+\value{
+  \code{integer64} returns a vector of 'integer64', 
+   i.e. a vector of \code{\link{double}} decorated with class 'integer64'.
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+Maintainer: Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ package }
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{integer}} in base R }
+\examples{
+message("Using integer64 in vector")
+x <- integer64(8)    # create 64 bit vector
+x
+is.atomic(x)         # TRUE
+is.integer64(x)      # TRUE
+is.numeric(x)        # TRUE
+is.integer(x)        # FALSE - debatable
+is.double(x)         # FALSE - might change
+x[] <- 1:2           # assigned value is recycled as usual
+x[1:6]               # subscripting as usual
+length(x) <- 13      # changing length as usual
+x
+rep(x, 2)            # replicate as usual
+seq(as.integer64(1), 10)     # seq.integer64 is dispatched on first given argument
+seq(to=as.integer64(10), 1)  # seq.integer64 is dispatched on first given argument
+seq.integer64(along.with=x)  # or call seq.integer64 directly
+# c.integer64 is dispatched only if *first* argument is integer64 ...
+x <- c(x,runif(length(x), max=100)) 
+# ... and coerces everything to integer64 - including double
+x                                   
+names(x) <- letters  # use names as usual
+x
+
+message("Using integer64 in array - note that 'matrix' currently does not work")
+message("as.vector.integer64 removed as requested by the CRAN maintainer")
+message("as consequence 'array' also does not work anymore")
+%y <- array(as.integer64(NA), dim=c(3,4), dimnames=list(letters[1:3], LETTERS[1:4]))
+message("we still can create a matrix or array by assigning 'dim'")
+y <- rep(as.integer64(NA), 12)
+dim(y) <- c(3,4)
+dimnames(y) <- list(letters[1:3], LETTERS[1:4])
+y["a",] <- 1:2       # assigning as usual
+y
+y[1:2,-4]            # subscripting as usual
+# cbind.integer64 dispatched on any argument and coerces everything to integer64
+cbind(E=1:3, F=runif(3, 0, 100), G=c("-1","0","1"), y)
+
+message("Using integer64 in data.frame")
+str(as.data.frame(x))
+str(as.data.frame(y))
+str(data.frame(y))
+str(data.frame(I(y)))
+d <- data.frame(x=x, y=runif(length(x), 0, 100))
+d
+d$x
+
+message("Using integer64 with csv files")
+fi64 <- tempfile()
+write.csv(d, file=fi64, row.names=FALSE)
+e <- read.csv(fi64, colClasses=c("integer64", NA))
+unlink(fi64)
+str(e)
+identical.integer64(d$x,e$x)
+
+message("Serializing and unserializing integer64")
+dput(d, fi64)
+e <- dget(fi64)
+identical.integer64(d$x,e$x)
+e <- d[,]
+save(e, file=fi64)
+rm(e)
+load(file=fi64)
+identical.integer64(d,e)
+
+### A couple of unit tests follow hidden in a dontshow{} directive ###
+  \dontshow{
+message("Testing identical.integer64")
+i64 <- as.double(NA); class(i64) <- "integer64"
+stopifnot(identical(unclass(i64-1), unclass(i64+1)))
+stopifnot(identical(i64-1, i64+1))
+stopifnot(!identical.integer64(i64-1, i64+1))
+
+message("Testing dispatch of 'c' method")
+stopifnot(identical.integer64(c(integer64(0), NA), as.integer64(NA)))
+message("Dispatch on the second argument fails and we want to be notified once that changes")
+stopifnot(!identical.integer64(c(NA, integer64(0)), as.integer64(NA)))
+
+message("Testing minus and plus")
+d64 <- c(-.Machine$double.base^.Machine$double.digits, -.Machine$integer.max, -1, 0, 1, .Machine$integer.max, .Machine$double.base^.Machine$double.digits)
+i64 <- as.integer64(d64)
+stopifnot(identical.integer64(i64-1+1,i64))
+stopifnot(identical.integer64(i64+1-1,i64))
+
+message("Testing minus and plus edge cases and 'rev'\n")
+stopifnot(identical.integer64(lim.integer64()+1-1, c(lim.integer64()[1], NA)))
+stopifnot(identical.integer64(rev(lim.integer64())-1+1, c(lim.integer64()[2], NA)))
+
+message("Testing 'range.integer64', multiplication and integer division")
+i64 <- integer64(63)
+i64[1] <- 1
+for (i in 2:63)
+	i64[i] <- 2*i64[i-1]
+stopifnot(identical.integer64(i64 * rev(i64), rep(i64[63], 63)))
+for (i in 63:2)
+	i64[i-1] <- i64[i]\%/\%2
+stopifnot(identical.integer64(i64 * rev(i64), rep(i64[63], 63)))
+for (i in 63:2)
+	i64[i-1] <- i64[i]/2
+stopifnot(identical.integer64(i64 * rev(i64), rep(i64[63], 63)))
+stopifnot(identical.integer64(c( -i64[63] - (i64[63]-1), i64[63]+(i64[63]-1) ), lim.integer64()))
+
+stopifnot(identical.integer64(i64[-1]\%/\%2*as.integer64(2), i64[-1]))
+stopifnot(identical.integer64(i64[-1]\%/\%2L*as.integer64(2), i64[-1]))
+stopifnot(identical.integer64(i64[-1]/2*as.integer64(2), i64[-1]))
+stopifnot(identical.integer64(i64[-1]/2*as.integer64(2), i64[-1]))
+
+stopifnot(identical.integer64(i64[-63]*2\%/\%2, i64[-63]))
+stopifnot(identical.integer64(i64[-63]*2L\%/\%2L, i64[-63]))
+stopifnot(identical.integer64(as.integer64(i64[-63]*2/2), i64[-63]))
+stopifnot(identical.integer64(as.integer64(i64[-63]*2L/2L), i64[-63]))
+
+message("Testing sqrt, power and log")
+stopifnot(identical.integer64( as.integer64(sqrt(i64[-1][c(FALSE, TRUE)])*sqrt(i64[-1][c(FALSE, TRUE)])), i64[-1][c(FALSE, TRUE)] ))
+
+stopifnot(identical.integer64(as.integer64(2)^(0:62), i64))
+stopifnot(identical.integer64(as.integer64(0:62), as.integer64(round(log2(i64)))))
+stopifnot(identical.integer64(as.integer64(round(log(as.integer64(2)^(0:62), 2))), as.integer64(0:62)))
+stopifnot(identical.integer64(as.integer64(round(log(as.integer64(3)^(0:39), 3))), as.integer64(0:39)))
+stopifnot(identical.integer64(as.integer64(round(log(as.integer64(10)^(0:18), 10))), as.integer64(0:18)))
+stopifnot(identical.integer64(as.integer64(round(log10(as.integer64(10)^(0:18)))), as.integer64(0:18)))
+
+stopifnot(identical.integer64((as.integer64(2)^(1:62))^(1/1:62), as.integer64(rep(2, 62))))
+stopifnot(identical.integer64((as.integer64(3)^(1:39))^(1/1:39), as.integer64(rep(3, 39))))
+stopifnot(identical.integer64((as.integer64(10)^(1:18))^(1/1:18), as.integer64(rep(10, 18))))
+
+message("Testing c and rep")
+stopifnot(identical.integer64( as.integer64(rep(1:3, 1:3)), rep(as.integer64(1:3), 1:3)))
+stopifnot(identical.integer64( as.integer64(rep(1:3, 3)), rep(as.integer64(1:3), 3)))
+ 
+x <- as.double(c(NA,NA,NA))
+class(x) <- "integer64"
+x <- x + -1:1
+stopifnot(identical.integer64(rep(x, 3), c(x,x,x) ))
+stopifnot(identical.integer64(c.integer64(list(x,x,x), recursive=TRUE), c(x,x,x) ))
+
+message("Testing seq")
+stopifnot(identical.integer64(seq(as.integer64(1), 10, 2), as.integer64(seq(1, 10, 2)) ))
+stopifnot(identical.integer64(seq(as.integer64(1), by=2, length.out=5), as.integer64(seq(1, by=2, length.out=5)) ))
+stopifnot(identical.integer64(seq(as.integer64(1), by=2, length.out=6), as.integer64(seq(1, by=2, length.out=6)) ))
+stopifnot(identical.integer64(seq.integer64(along.with=3:5), as.integer64(seq(along.with=3:5)) ))
+stopifnot(identical.integer64(seq(as.integer64(1), to=-9), as.integer64(seq(1, to=-9)) ))
+
+message("Testing cbind and rbind")
+stopifnot(identical.integer64( cbind(as.integer64(1:3), 1:3), {x <- rep(as.integer64(1:3), 2); dim(x)<-c(3,2);x}))
+stopifnot(identical.integer64( rbind(as.integer64(1:3), 1:3), t({x <- rep(as.integer64(1:3), 2); dim(x)<-c(3,2);x})))
+
+message("Testing coercion")
+stopifnot(identical( as.double(as.integer64(c(NA, seq(0, 9, 0.25)))), as.double(as.integer(c(NA, seq(0, 9, 0.25))))))
+stopifnot(identical( as.character(as.integer64(c(NA, seq(0, 9, 0.25)))), as.character(as.integer(c(NA, seq(0, 9, 0.25))))))
+stopifnot(identical( as.integer(as.integer64(c(NA, seq(0, 9, 0.25)))), as.integer(c(NA, seq(0, 9, 0.25)))))
+stopifnot(identical( as.logical(as.integer64(c(NA, seq(0, 9, 0.25)))), as.logical(as.integer(c(NA, seq(0, 9, 0.25))))))
+stopifnot(identical( as.integer(as.integer64(c(NA, FALSE, TRUE))), as.integer(c(NA, FALSE, TRUE))))
+stopifnot(identical( as.integer64(as.integer(as.integer64(-9:9))), as.integer64(-9:9)))
+stopifnot(identical( as.integer64(as.double(as.integer64(-9:9))), as.integer64(-9:9)))
+stopifnot(identical( as.integer64(as.character(as.integer64(-9:9))), as.integer64(-9:9)))
+stopifnot(identical( as.integer64(as.character(lim.integer64())), lim.integer64()))
+
+message("-- testing logical operators --")
+stopifnot(identical.integer64(!c(NA, -1:1), !c(as.integer64(NA), -1:1)))
+stopifnot(identical.integer64(rep(c(NA, -1:1), 4)&rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))&as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+stopifnot(identical.integer64(rep(c(NA, -1:1), 4)|rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))|as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+stopifnot(identical.integer64(xor(rep(c(NA, -1:1), 4),rep(c(NA, -1:1), rep(4, 4))), xor(as.integer64(rep(c(NA, -1:1), 4)),as.integer64(rep(c(NA, -1:1), rep(4, 4))))))
+
+message("-- testing comparison operators --")
+stopifnot(identical.integer64(rep(c(NA, -1:1), 4)==rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))==as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+stopifnot(identical.integer64(rep(c(NA, -1:1), 4)!=rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))!=as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+stopifnot(identical.integer64(rep(c(NA, -1:1), 4)>rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))>as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+stopifnot(identical.integer64(rep(c(NA, -1:1), 4)>=rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))>=as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+stopifnot(identical.integer64(rep(c(NA, -1:1), 4)<rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))<as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+stopifnot(identical.integer64(rep(c(NA, -1:1), 4)<=rep(c(NA, -1:1), rep(4, 4)), as.integer64(rep(c(NA, -1:1), 4))<=as.integer64(rep(c(NA, -1:1), rep(4, 4)))))
+
+message("-- testing vector functions --")
+stopifnot(identical.integer64( is.na(as.integer64(c(NA, -1:1))), is.na(c(NA, -1:1)) ))
+stopifnot(identical.integer64( format(as.integer64(c(NA, -1:1))), format(c(NA, -1:1)) ))
+stopifnot(identical.integer64( abs(as.integer64(c(NA, -1:1))), as.integer64(abs(c(NA, -1:1))) ))
+stopifnot(identical.integer64( sign(as.integer64(c(NA, -1:1))), as.integer64(sign(c(NA, -1:1))) ))
+stopifnot(identical.integer64( ceiling(as.integer64(c(NA, -1:1))), as.integer64(ceiling(c(NA, -1:1))) ))
+stopifnot(identical.integer64( floor(as.integer64(c(NA, -1:1))), as.integer64(floor(c(NA, -1:1))) ))
+stopifnot(identical.integer64( trunc(as.integer64(c(NA, -1:1))), as.integer64(trunc(c(NA, -1:1))) ))
+stopifnot(identical.integer64( signif(as.integer64(c(NA, -1:1))), as.integer64(c(NA, -1:1)) ))
+
+message("Testing summary functions")
+stopifnot(identical(all(as.integer(1)), all(as.integer64(1))))
+stopifnot(identical(all(as.integer(0)), all(as.integer64(0))))
+stopifnot(identical(all(as.integer(NA)), all(as.integer64(NA))))
+stopifnot(identical(all(as.integer(NA), na.rm=TRUE), all(as.integer64(NA), na.rm=TRUE)))
+stopifnot(identical(all(as.integer(1), NA), all(as.integer64(1), NA)))
+stopifnot(identical(all(as.integer(0), NA), all(as.integer64(0), NA)))
+stopifnot(identical(all(as.integer(1), NA, na.rm=TRUE), all(as.integer64(1), NA, na.rm=TRUE)))
+stopifnot(identical(all(as.integer(0), NA, na.rm=TRUE), all(as.integer64(0), NA, na.rm=TRUE)))
+stopifnot(identical(all(as.integer(c(1, NA))), all(as.integer64(c(1, NA)))))
+stopifnot(identical(all(as.integer(c(0, NA))), all(as.integer64(c(0, NA)))))
+stopifnot(identical(all(as.integer(c(1, NA)), na.rm=TRUE), all(as.integer64(c(1, NA)), na.rm=TRUE)))
+stopifnot(identical(all(as.integer(c(0, NA)), na.rm=TRUE), all(as.integer64(c(0, NA)), na.rm=TRUE)))
+
+stopifnot(identical(any(as.integer(1)), any(as.integer64(1))))
+stopifnot(identical(any(as.integer(0)), any(as.integer64(0))))
+stopifnot(identical(any(as.integer(NA)), any(as.integer64(NA))))
+stopifnot(identical(any(as.integer(NA), na.rm=TRUE), any(as.integer64(NA), na.rm=TRUE)))
+stopifnot(identical(any(as.integer(1), NA), any(as.integer64(1), NA)))
+stopifnot(identical(any(as.integer(0), NA), any(as.integer64(0), NA)))
+stopifnot(identical(any(as.integer(1), NA, na.rm=TRUE), any(as.integer64(1), NA, na.rm=TRUE)))
+stopifnot(identical(any(as.integer(0), NA, na.rm=TRUE), any(as.integer64(0), NA, na.rm=TRUE)))
+stopifnot(identical(any(as.integer(c(1, NA))), any(as.integer64(c(1, NA)))))
+stopifnot(identical(any(as.integer(c(0, NA))), any(as.integer64(c(0, NA)))))
+stopifnot(identical(any(as.integer(c(1, NA)), na.rm=TRUE), any(as.integer64(c(1, NA)), na.rm=TRUE)))
+stopifnot(identical(any(as.integer(c(0, NA)), na.rm=TRUE), any(as.integer64(c(0, NA)), na.rm=TRUE)))
+
+stopifnot(identical.integer64(as.integer64(sum(c(2, 3, NA))), sum(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(sum(c(2, 3, NA), na.rm=TRUE)), sum(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(sum(c(2, 3, NA))), sum(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(sum(c(2, 3, NA), na.rm=TRUE)), sum(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(sum(2, 3, NA)), sum(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(sum(2, 3, NA, na.rm=TRUE)), sum(as.integer64(2), 3, NA, na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(sum(2, 3, NA)), sum(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(sum(2, 3, NA, na.rm=TRUE)), sum(as.integer64(2), 3, NA, na.rm=TRUE)))
+
+stopifnot(identical.integer64(as.integer64(prod(c(2, 3, NA))), prod(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(prod(c(2, 3, NA), na.rm=TRUE)), prod(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(prod(c(2, 3, NA))), prod(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(prod(c(2, 3, NA), na.rm=TRUE)), prod(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(prod(2, 3, NA)), prod(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(prod(2, 3, NA, na.rm=TRUE)), prod(as.integer64(2), 3, NA, na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(prod(2, 3, NA)), prod(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(prod(2, 3, NA, na.rm=TRUE)), prod(as.integer64(2), 3, NA, na.rm=TRUE)))
+
+stopifnot(identical.integer64(as.integer64(min(c(2, 3, NA))), min(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(min(c(2, 3, NA), na.rm=TRUE)), min(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(min(c(2, 3, NA))), min(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(min(c(2, 3, NA), na.rm=TRUE)), min(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(min(2, 3, NA)), min(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(min(2, 3, NA, na.rm=TRUE)), min(as.integer64(2), 3, NA, na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(min(2, 3, NA)), min(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(min(2, 3, NA, na.rm=TRUE)), min(as.integer64(2), 3, NA, na.rm=TRUE)))
+
+stopifnot(identical.integer64(as.integer64(max(c(2, 3, NA))), max(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(max(c(2, 3, NA), na.rm=TRUE)), max(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(max(c(2, 3, NA))), max(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(max(c(2, 3, NA), na.rm=TRUE)), max(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(max(2, 3, NA)), max(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(max(2, 3, NA, na.rm=TRUE)), max(as.integer64(2), 3, NA, na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(max(2, 3, NA)), max(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(max(2, 3, NA, na.rm=TRUE)), max(as.integer64(2), 3, NA, na.rm=TRUE)))
+
+stopifnot(identical.integer64(as.integer64(range(c(2, 3, NA))), range(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(range(c(2, 3, NA), na.rm=TRUE)), range(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(range(c(2, 3, NA))), range(as.integer64(c(2, 3, NA)))))
+stopifnot(identical.integer64(as.integer64(range(c(2, 3, NA), na.rm=TRUE)), range(as.integer64(c(2, 3, NA)), na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(range(2, 3, NA)), range(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(range(2, 3, NA, na.rm=TRUE)), range(as.integer64(2), 3, NA, na.rm=TRUE)))
+stopifnot(identical.integer64(as.integer64(range(2, 3, NA)), range(as.integer64(2), 3, NA)))
+stopifnot(identical.integer64(as.integer64(range(2, 3, NA, na.rm=TRUE)), range(as.integer64(2), 3, NA, na.rm=TRUE)))
+
+message("-- testing cummulative functions --")
+stopifnot(identical.integer64(as.integer64(cumsum(c(2, 3, NA, 1, 4))), cumsum(as.integer64(c(2, 3, NA, 1, 4)))))
+stopifnot(identical.integer64(as.integer64(cumprod(c(2, 3, NA, 1, 4))), cumprod(as.integer64(c(2, 3, NA, 1, 4)))))
+stopifnot(identical.integer64(as.integer64(cummin(c(2, 3, NA, 1, 4))), cummin(as.integer64(c(2, 3, NA, 1, 4)))))
+stopifnot(identical.integer64(as.integer64(cummax(c(2, 3, NA, 1, 4))), cummax(as.integer64(c(2, 3, NA, 1, 4)))))
+
+message("testing diff")
+d64 <- diffinv(rep(.Machine$integer.max, 100), lag=2, differences=2)
+i64 <- as.integer64(d64)
+identical(diff(d64, lag=2, differences=2), as.double(diff(i64, lag=2, differences=2)))
+
+  }
+
+  \dontrun{
+message("== Differences between integer64 and int64 ==")
+require(bit64)
+require(int64)
+
+message("-- integer64 is atomic --")
+is.atomic(integer64())
+#is.atomic(int64())
+str(integer64(3))
+#str(int64(3))
+
+message("-- The following performance numbers are measured under RWin64  --")
+message("-- under RWin32 the advantage of integer64 over int64 is smaller --")
+
+message("-- integer64 needs 7x/5x less RAM than int64 under 64/32 bit OS 
+(and twice the RAM of integer as it should be) --")
+#as.vector(object.size(int64(1e6))/object.size(integer64(1e6)))
+as.vector(object.size(integer64(1e6))/object.size(integer(1e6)))
+
+message("-- integer64 creates 2000x/1300x faster than int64 under 64/32 bit OS
+(and 3x the time of integer) --")
+t32 <- system.time(integer(1e8))
+t64 <- system.time(integer64(1e8))
+#T64 <- system.time(int64(1e7))*10  # using 1e8 as above stalls our R on an i7 8 GB RAM Thinkpad
+#T64/t64
+t64/t32
+
+i32 <- sample(1e6)
+d64 <- as.double(i32)
+
+message("-- the following timings are rather conservative since timings
+ of integer64 include garbage collection -- due to looped calls")
+message("-- integer64 coerces 900x/100x faster than int64 
+ under 64/32 bit OS (and 2x the time of coercing to integer) --")
+t32 <- system.time(for(i in 1:1000)as.integer(d64))
+t64 <- system.time(for(i in 1:1000)as.integer64(d64))
+#T64 <- system.time(as.int64(d64))*1000
+#T64/t64
+t64/t32
+td64 <- system.time(for(i in 1:1000)as.double(i32))
+t64 <- system.time(for(i in 1:1000)as.integer64(i32))
+#T64 <- system.time(for(i in 1:10)as.int64(i32))*100
+#T64/t64
+t64/td64
+
+message("-- integer64 serializes 4x/0.8x faster than int64 
+ under 64/32 bit OS (and less than 2x/6x the time of integer or double) --")
+t32 <- system.time(for(i in 1:10)serialize(i32, NULL))
+td64 <- system.time(for(i in 1:10)serialize(d64, NULL))
+i64 <- as.integer64(i32); 
+t64 <- system.time(for(i in 1:10)serialize(i64, NULL))
+rm(i64); gc()
+#I64 <- as.int64(i32); 
+#T64 <- system.time(for(i in 1:10)serialize(I64, NULL))
+#rm(I64); gc()
+#T64/t64
+t64/t32
+t64/td64
+
+
+message("-- integer64 adds 250x/60x faster than int64
+ under 64/32 bit OS (and less than 6x the time of integer or double) --")
+td64 <- system.time(for(i in 1:100)d64+d64)
+t32 <- system.time(for(i in 1:100)i32+i32)
+i64 <- as.integer64(i32); 
+t64 <- system.time(for(i in 1:100)i64+i64)
+rm(i64); gc()
+#I64 <- as.int64(i32); 
+#T64 <- system.time(for(i in 1:10)I64+I64)*10
+#rm(I64); gc()
+#T64/t64
+t64/t32
+t64/td64
+
+message("-- integer64 sums 3x/0.2x faster than int64 
+(and at about 5x/60X the time of integer and double) --")
+td64 <- system.time(for(i in 1:100)sum(d64))
+t32 <- system.time(for(i in 1:100)sum(i32))
+i64 <- as.integer64(i32); 
+t64 <- system.time(for(i in 1:100)sum(i64))
+rm(i64); gc()
+#I64 <- as.int64(i32); 
+#T64 <- system.time(for(i in 1:100)sum(I64))
+#rm(I64); gc()
+#T64/t64
+t64/t32
+t64/td64
+
+message("-- integer64 diffs 5x/0.85x faster than integer and double
+(int64 version 1.0 does not support diff) --")
+td64 <- system.time(for(i in 1:10)diff(d64, lag=2L, differences=2L))
+t32 <- system.time(for(i in 1:10)diff(i32, lag=2L, differences=2L))
+i64 <- as.integer64(i32); 
+t64 <- system.time(for(i in 1:10)diff(i64, lag=2L, differences=2L))
+rm(i64); gc()
+t64/t32
+t64/td64
+
+
+message("-- integer64 subscripts 1000x/340x faster than int64
+(and at the same speed / 10x slower as integer) --")
+ts32 <- system.time(for(i in 1:1000)sample(1e6, 1e3))
+t32<- system.time(for(i in 1:1000)i32[sample(1e6, 1e3)])
+i64 <- as.integer64(i32); 
+t64 <- system.time(for(i in 1:1000)i64[sample(1e6, 1e3)])
+rm(i64); gc()
+#I64 <- as.int64(i32); 
+#T64 <- system.time(for(i in 1:100)I64[sample(1e6, 1e3)])*10
+#rm(I64); gc()
+#(T64-ts32)/(t64-ts32)
+(t64-ts32)/(t32-ts32)
+
+message("-- integer64 assigns 200x/90x faster than int64
+(and 50x/160x slower than integer) --")
+ts32 <- system.time(for(i in 1:100)sample(1e6, 1e3))
+t32 <- system.time(for(i in 1:100)i32[sample(1e6, 1e3)] <- 1:1e3)
+i64 <- as.integer64(i32); 
+i64 <- system.time(for(i in 1:100)i64[sample(1e6, 1e3)] <- 1:1e3)
+rm(i64); gc()
+#I64 <- as.int64(i32); 
+#I64 <- system.time(for(i in 1:10)I64[sample(1e6, 1e3)] <- 1:1e3)*10
+#rm(I64); gc()
+#(T64-ts32)/(t64-ts32)
+(t64-ts32)/(t32-ts32)
+
+
+tdfi32 <- system.time(dfi32 <- data.frame(a=i32, b=i32, c=i32))
+tdfsi32 <- system.time(dfi32[1e6:1,])
+fi32 <- tempfile()
+tdfwi32 <- system.time(write.csv(dfi32, file=fi32, row.names=FALSE))
+tdfri32 <- system.time(read.csv(fi32, colClasses=rep("integer", 3)))
+unlink(fi32)
+rm(dfi32); gc()
+
+i64 <- as.integer64(i32); 
+tdfi64 <- system.time(dfi64 <- data.frame(a=i64, b=i64, c=i64))
+tdfsi64 <- system.time(dfi64[1e6:1,])
+fi64 <- tempfile()
+tdfwi64 <- system.time(write.csv(dfi64, file=fi64, row.names=FALSE))
+tdfri64 <- system.time(read.csv(fi64, colClasses=rep("integer64", 3)))
+unlink(fi64)
+rm(i64, dfi64); gc()
+
+#I64 <- as.int64(i32); 
+#tdfI64 <- system.time(dfI64<-data.frame(a=I64, b=I64, c=I64))
+#tdfsI64 <- system.time(dfI64[1e6:1,])
+#fI64 <- tempfile()
+#tdfwI64 <- system.time(write.csv(dfI64, file=fI64, row.names=FALSE))
+#tdfrI64 <- system.time(read.csv(fI64, colClasses=rep("int64", 3)))
+#unlink(fI64)
+#rm(I64, dfI64); gc()
+
+message("-- integer64 coerces 40x/6x faster to data.frame than int64
+(and factor 1/9 slower than integer) --")
+#tdfI64/tdfi64
+tdfi64/tdfi32
+message("-- integer64 subscripts from data.frame 20x/2.5x faster than int64
+ (and 3x/13x slower than integer) --")
+#tdfsI64/tdfsi64
+tdfsi64/tdfsi32
+message("-- integer64 csv writes about 2x/0.5x faster than int64
+(and about 1.5x/5x slower than integer) --")
+#tdfwI64/tdfwi64
+tdfwi64/tdfwi32
+message("-- integer64 csv reads about 3x/1.5 faster than int64
+(and about 2x slower than integer) --")
+#tdfrI64/tdfri64
+tdfri64/tdfri32
+
+rm(i32, d64); gc()
+
+
+message("-- investigating the impact on garbage collection: --")
+message("-- the fragmented structure of int64 messes up R's RAM --")
+message("-- and slows down R's gargbage collection just by existing --")
+
+td32 <- double(21)
+td32[1] <- system.time(d64 <- double(1e7))[3]
+for (i in 2:11)td32[i] <- system.time(gc(), gcFirst=FALSE)[3]
+rm(d64)
+for (i in 12:21)td32[i] <- system.time(gc(), gcFirst=FALSE)[3]
+
+t64 <- double(21)
+t64[1] <- system.time(i64 <- integer64(1e7))[3]
+for (i in 2:11)t64[i] <- system.time(gc(), gcFirst=FALSE)[3]
+rm(i64)
+for (i in 12:21)t64[i] <- system.time(gc(), gcFirst=FALSE)[3]
+
+#T64 <- double(21)
+#T64[1] <- system.time(I64 <- int64(1e7))[3]
+#for (i in 2:11)T64[i] <- system.time(gc(), gcFirst=FALSE)[3]
+#rm(I64)
+#for (i in 12:21)T64[i] <- system.time(gc(), gcFirst=FALSE)[3]
+
+#matplot(1:21, cbind(td32, t64, T64), pch=c("d","i","I"), log="y")
+matplot(1:21, cbind(td32, t64), pch=c("d","i"), log="y")
+  }
+
+}
diff --git a/man/bit64S3.rd b/man/bit64S3.rd
new file mode 100644
index 0000000..9e5511c
--- /dev/null
+++ b/man/bit64S3.rd
@@ -0,0 +1,114 @@
+\name{bit64S3}
+\alias{bit64S3}
+\alias{:}
+\alias{:.default}
+\alias{:.integer64}
+\alias{is.double}
+\alias{is.double.default}
+\alias{is.double.integer64}
+\alias{match}
+\alias{match.default}
+\alias{\%in\%}
+\alias{\%in\%.default}
+\alias{rank}
+\alias{rank.default}
+%\alias{table}
+%\alias{table.default}
+\alias{order}
+\alias{order.default}
+\title{
+  Tunrning base R functions into S3 generics for bit64 
+}
+\description{
+	Turn those base functions S3 generic which are used in bit64
+}
+\usage{
+	from:to
+ #--as-cran complains about \method{:}{default}(from, to)
+ #--as-cran complains about \method{:}{integer64}(from, to)
+	is.double(x)
+ \method{is.double}{default}(x)
+ \method{is.double}{integer64}(x)
+	match(x, table, ...)
+ \method{match}{default}(x, table, ...)
+	x \%in\% table
+ \method{\%in\%}{default}(x, table)
+	rank(x, ...)
+ \method{rank}{default}(x, ...)
+	%table(...)
+ %\method{table}{default}(...)
+	order(...)
+ \method{order}{default}(...)
+}
+\arguments{
+  \item{x}{
+	integer64 vector: the values to be matched, optionally carrying a cache created with \code{\link{hashcache}}
+}
+  \item{table}{
+	integer64 vector: the values to be matched against, optionally carrying a cache created with \code{\link{hashcache}} or \code{\link{sortordercache}}
+}
+  \item{from}{ scalar denoting first element of sequence }
+  \item{to}{ scalar denoting last element of sequence }
+  \item{\dots}{ ignored }
+}
+\details{
+   The following functions are turned into S3 gernerics in order to dispatch methods for \code{\link{integer64}}: 
+   \preformatted{
+	   \code{\link{:}}
+	   \code{\link{is.double}}
+	   \code{\link{match}}
+	   \code{\link{\%in\%}}
+	   %\code{\link{table}}
+	   \code{\link{rank}}
+	   \code{\link{order}}
+   }
+}
+\value{
+	\code{\link{invisible}}
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\note{
+	\code{\link{is.double}} returns \code{FALSE} for \code{\link{integer64}} \cr
+	\code{\link{:}} currently only dispatches at its first argument, thus \code{as.integer64(1):9} works but \code{1:as.integer64(9)} doesn't
+	\code{\link{match}} currently only dispatches at its first argument and expects its second argument also to be integer64, otherwise throws an error. Beware of something like \code{match(2, as.integer64(0:3))}
+	\code{\link{\%in\%}} currently only dispatches at its first argument and expects its second argument also to be integer64, otherwise throws an error. Beware of something like \code{2 \%in\% as.integer64(0:3)}
+	\code{\link{order}} currently only orders a single argument, trying more than one raises an error
+}
+\seealso{
+	\code{\link{bit64}}, \code{\link{S3}}
+}
+\examples{
+ is.double(as.integer64(1))
+	as.integer64(1):9
+ match(as.integer64(2), as.integer64(0:3))
+ as.integer64(2) \%in\% as.integer64(0:3)
+ 
+ unique(as.integer64(c(1,1,2)))
+ rank(as.integer64(c(1,1,2)))
+ 
+ %table(as.integer64(c(1,1,2)))
+ %table(as.integer64(c(1,1,2)),as.integer64(c(3,4,4)))
+ %table(as.integer64(c(1,1,2)),c(3,4,4))
+ %table(c(1,1,2),as.integer64(c(3,4,4)))
+ 
+ order(as.integer64(c(1,NA,2)))
+ 
+ \dontshow{
+ stopifnot(identical(match(as.integer64(2), as.integer64(0:3)), match(2, 0:3)))
+ stopifnot(identical(as.integer64(2) \%in\% as.integer64(0:3), 2 \%in\% 0:3))
+ 
+ stopifnot(identical(unique(as.integer64(c(1,1,2))), as.integer64(unique(c(1,1,2)))))
+ stopifnot(identical(rank(as.integer64(c(1,1,2))), rank(c(1,1,2))))
+ 
+ %stopifnot(identical(table(as.integer64(c(1,1,2))), table(c(1,1,2))))
+ %stopifnot(identical(table(as.integer64(c(1,1,2)),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+ %stopifnot(identical(table(as.integer64(c(1,1,2)),c(3,4,4)), table(c(1,1,2),c(3,4,4))))
+ %stopifnot(identical(table(c(1,1,2),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+ 
+ stopifnot(identical(order(as.integer64(c(1,NA,2))), order(c(1,NA,2))))
+ stopifnot(identical(order(as.integer64(c(1,NA,2)), decreasing=TRUE), order(c(1,NA,2), decreasing=TRUE)))
+ }
+}
+\keyword{ methods }
diff --git a/man/c.integer64.rd b/man/c.integer64.rd
new file mode 100644
index 0000000..f374126
--- /dev/null
+++ b/man/c.integer64.rd
@@ -0,0 +1,39 @@
+\name{c.integer64}
+\alias{c.integer64}
+\alias{cbind.integer64}
+\alias{rbind.integer64}
+\title{
+   Concatenating integer64 vectors
+}
+\description{
+  The ususal functions 'c', 'cbind' and 'rbind'
+}
+\usage{
+\method{c}{integer64}(\dots, recursive = FALSE)
+\method{cbind}{integer64}(\dots)
+\method{rbind}{integer64}(\dots)
+}
+\arguments{
+  \item{\dots}{ two or more arguments coerced to 'integer64' and passed to \code{\link{NextMethod}} }
+  \item{recursive}{ logical. If \code{recursive = TRUE}, the function recursively descends through lists (and pairlists) combining all their elements into a vector. }
+}
+\value{
+  \code{\link{c}} returns a integer64 vector of the total length of the input \cr
+  \code{\link{cbind}} and \code{\link{rbind}} return a integer64 matrix
+}
+\note{
+  R currently only dispatches generic 'c' to method 'c.integer64' if the first argument is 'integer64'
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{rep.integer64}} \code{\link{seq.integer64}} 
+          \code{\link{as.data.frame.integer64}} \code{\link{integer64}}  
+}
+\examples{
+  c(as.integer64(1), 2:6)
+  cbind(1:6, as.integer(1:6))
+  rbind(1:6, as.integer(1:6))
+}
diff --git a/man/cache.rd b/man/cache.rd
new file mode 100644
index 0000000..167c1cc
--- /dev/null
+++ b/man/cache.rd
@@ -0,0 +1,92 @@
+\name{cache}
+\alias{cache}
+\alias{newcache}
+\alias{jamcache}
+\alias{setcache}
+\alias{getcache}
+\alias{remcache}
+\alias{print.cache}
+\alias{still.identical}
+\title{
+	Atomic Caching
+}
+\description{
+	Functions for caching results attached to atomic objects
+}
+\usage{
+newcache(x)
+jamcache(x)
+cache(x)
+setcache(x, which, value)
+getcache(x, which)
+remcache(x)
+\method{print}{cache}(x, all.names = FALSE, pattern, \dots)
+still.identical(x, y)
+}
+\arguments{
+  \item{x}{
+  an integer64 vector (or a cache object in case of \code{print.cache})
+}
+  \item{y}{
+  an integer64 vector
+}
+  \item{which}{
+  A character naming the object to be retrieved from the cache or to be stored in the cache
+}
+  \item{value}{
+  An object to be stored in the cache 
+}
+  \item{all.names}{
+  passed to \code{\link{ls}} when listing the cache content
+}
+  \item{pattern}{
+  passed to \code{\link{ls}} when listing the cache content
+}
+  \item{\dots}{
+	ignored
+}
+}
+\details{
+	A \code{cache} is an \code{link{environment}} attached to an atomic object with the \code{link{attrib}} name 'cache'. 
+	It contains at least a reference to the atomic object that carries the cache. 
+	This is used when accessing the cache to detect whether the object carrying the cache has been modified meanwhile.
+	Function \code{still.identical(x,y)} checks whether the objects \code{x} and \code{y} \cr
+	Function \code{newcache(x)} creates a new cache referencing  \code{x} \cr
+	Function \code{jamcache(x)} forces \code{x} to have a cache \cr
+	Function \code{cache(x)} returns the cache attached to \code{x} if it is not found to be outdated \cr
+	Function \code{setcache(x, which, value)} assigns a value into the cache of \code{x} \cr
+	Function \code{getcache(x, which)} gets cache value 'which' from \code{x} \cr
+	Function \code{remcache} removes the cache from \code{x} \cr
+}
+\value{
+	see details
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+	Functions that get and set small cache-content automatically when a cache is present: \code{\link{na.count}}, \code{\link{nvalid}}, \code{\link{is.sorted}}, \code{\link{nunique}} and \code{\link{nties}} \cr
+	Setting big caches with a relevant memory footprint requires a conscious decision of the user: \code{\link{hashcache}}, \code{\link{sortcache}}, \code{\link{ordercache}} and \code{\link{sortordercache}} \cr
+	Functions that use big caches: \code{\link{match.integer64}}, \code{\link{\%in\%.integer64}}, \code{\link{duplicated.integer64}}, \code{\link{unique.integer64}}, \code{\link{unipos}}, \code{\link{table.integer64}}, \code{\link{as.factor.integer64}}, \code{\link{as.ordered.integer64}}, \code{\link{keypos}}, \code{\link{tiepos}}, \code{\link{rank.integer64}}, \code{\link{prank}}, \code{\link{qtile}}, \code{\link{quantile.integer64}}, \code{\link{median.integer64}} and \code{\link{summary. [...]
+}
+\examples{
+	x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+	y <- x
+	still.identical(x,y)
+	y[1] <- NA
+	still.identical(x,y)
+	mycache <- newcache(x)
+	ls(mycache)
+	mycache
+	rm(mycache)
+	jamcache(x)
+	cache(x)
+	x[1] <- NA
+	cache(x)
+	getcache(x, "abc")
+	setcache(x, "abc", 1)
+	getcache(x, "abc")
+	remcache(x)
+	cache(x)
+}
+\keyword{ environment }
diff --git a/man/cumsum.integer64.rd b/man/cumsum.integer64.rd
new file mode 100644
index 0000000..8d69603
--- /dev/null
+++ b/man/cumsum.integer64.rd
@@ -0,0 +1,42 @@
+\name{cumsum.integer64}
+\alias{cummin.integer64}
+\alias{cummax.integer64}
+\alias{cumsum.integer64}
+\alias{cumprod.integer64}
+\alias{diff.integer64}
+\title{
+   Cumulative Sums, Products, Extremes and lagged differences
+}
+\description{
+  Cumulative Sums, Products, Extremes and lagged differences
+}
+\usage{
+\method{cummin}{integer64}(x)
+\method{cummax}{integer64}(x)
+\method{cumsum}{integer64}(x)
+\method{cumprod}{integer64}(x)
+\method{diff}{integer64}(x, lag = 1L, differences = 1L, \dots)
+}
+\arguments{
+  \item{x}{ an atomic vector of class 'integer64'}
+  \item{lag}{ see \code{\link{diff}} }
+  \item{differences}{ see \code{\link{diff}} }
+  \item{\dots}{ ignored }
+}
+\value{
+  \code{\link{cummin}}, \code{\link{cummax}} , \code{\link{cumsum}} and \code{\link{cumprod}} 
+     return a integer64 vector of the same length as their input\cr
+  \code{\link{diff}} returns a integer64 vector shorter by \code{lag*differences} elements \cr
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{sum.integer64}} \code{\link{integer64}}  }
+\examples{
+  cumsum(rep(as.integer64(1), 12))
+  diff(as.integer64(c(0,1:12)))
+  cumsum(as.integer64(c(0, 1:12)))
+  diff(cumsum(as.integer64(c(0,0,1:12))), differences=2)
+}
diff --git a/man/duplicated.integer64.rd b/man/duplicated.integer64.rd
new file mode 100644
index 0000000..4a4ff85
--- /dev/null
+++ b/man/duplicated.integer64.rd
@@ -0,0 +1,43 @@
+\name{duplicated.integer64}
+\alias{duplicated.integer64}
+\title{Determine Duplicate Elements of integer64}
+\description{
+  \code{duplicated()} determines which elements of a vector or data frame are duplicates
+  of elements with smaller subscripts, and returns a logical vector
+  indicating which elements (rows) are duplicates.
+}
+\usage{
+\method{duplicated}{integer64}(x, incomparables = FALSE, nunique = NULL, method = NULL, \dots)
+}
+\arguments{
+  \item{x}{a vector or a data frame or an array or \code{NULL}.}
+  \item{incomparables}{ignored}
+  \item{nunique}{
+	NULL or the number of unique values (including NA). Providing \code{nunique} can speed-up matching when \code{x} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{\dots}{ignored}
+}
+\details{
+  This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+
+  Suitable methods are \code{\link{hashdup}} (hashing), \code{\link{sortorderdup}} (fast ordering) and \code{\link{orderdup}} (memory saving ordering).
+}
+\value{
+    \code{duplicated()}: a logical vector of the same length as \code{x}.  
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{ \code{\link{duplicated}}, \code{\link{unique.integer64}}  }
+\examples{
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+duplicated(x)
+
+stopifnot(identical(duplicated(x),  duplicated(as.integer(x))))
+}
+\keyword{logic}
+\keyword{manip}
+
diff --git a/man/extract.replace.integer64.rd b/man/extract.replace.integer64.rd
new file mode 100644
index 0000000..e93a7c5
--- /dev/null
+++ b/man/extract.replace.integer64.rd
@@ -0,0 +1,43 @@
+\name{extract.replace.integer64}
+\alias{[.integer64}
+\alias{[[.integer64}
+\alias{[[<-.integer64}
+\alias{[<-.integer64}
+\title{
+   Extract or Replace Parts of an integer64 vector
+}
+\description{
+  Methods to extract and replace parts of an integer64 vector.
+}
+\usage{
+ \method{[}{integer64}(x, \dots)
+ \method{[}{integer64}(x, \dots) <- value 
+ \method{[[}{integer64}(x, \dots)
+ \method{[[}{integer64}(x, \dots) <- value
+}
+\arguments{
+  \item{x}{ an atomic vector }
+  \item{value}{ an atomic vector with values to be assigned }
+  \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+}
+\note{
+  You should not subscript non-existing elements and not use \code{NA}s as subscripts.
+  The current implementation returns \code{9218868437227407266} instead of \code{NA}.
+}
+\value{
+  A vector or scalar of class 'integer64'
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{[}} \code{\link{integer64}}  }
+\examples{
+  as.integer64(1:12)[1:3]
+  x <- as.integer64(1:12)
+  dim(x) <- c(3,4)
+  x
+  x[]
+  x[,2:3]
+}
diff --git a/man/format.integer64.rd b/man/format.integer64.rd
new file mode 100644
index 0000000..71aa37d
--- /dev/null
+++ b/man/format.integer64.rd
@@ -0,0 +1,72 @@
+\name{format.integer64}
+\alias{format.integer64}
+\alias{is.na.integer64}
+\alias{is.nan.integer64}
+\alias{is.finite.integer64}
+\alias{is.infinite.integer64}
+\alias{!.integer64}
+\alias{sign.integer64}
+\alias{abs.integer64}
+\alias{sqrt.integer64}
+\alias{log.integer64}
+\alias{log2.integer64}
+\alias{log10.integer64}
+\alias{floor.integer64}
+\alias{ceiling.integer64}
+\alias{trunc.integer64}
+\alias{round.integer64}
+\alias{signif.integer64}
+\alias{scale.integer64}
+\title{
+   Unary operators and functions for integer64 vectors
+}
+\description{
+  Unary operators and functions for integer64 vectors.
+}
+\usage{
+\method{format}{integer64}(x, justify="right", \dots)
+\method{is.na}{integer64}(x)
+\method{is.nan}{integer64}(x)
+\method{is.finite}{integer64}(x)
+\method{is.infinite}{integer64}(x)
+\method{!}{integer64}(x)
+\method{sign}{integer64}(x)
+\method{abs}{integer64}(x)
+\method{sqrt}{integer64}(x)
+\method{log}{integer64}(x, base)
+\method{log2}{integer64}(x)
+\method{log10}{integer64}(x)
+\method{floor}{integer64}(x)
+\method{ceiling}{integer64}(x)
+\method{trunc}{integer64}(x, \dots)
+\method{round}{integer64}(x, digits=0)
+\method{signif}{integer64}(x, digits=6)
+\method{scale}{integer64}(x, center = TRUE, scale = TRUE)
+}
+\arguments{
+  \item{x}{ an atomic vector of class 'integer64'}
+  \item{base}{ an atomic scalar (we save 50\% log-calls by not allowing a vector base) }
+  \item{digits}{ integer indicating the number of decimal places (round) or significant digits (signif) to be used. 
+                 Negative values are allowed (see \code{\link{round}}) }
+  \item{justify}{ should it be right-justified (the default), left-justified, centred or left alone. }
+  \item{center}{ see \code{\link{scale}} }
+  \item{scale}{  see \code{\link{scale}} }
+  \item{\dots}{ further arguments to the \code{\link{NextMethod}} }
+}
+\value{
+  \code{\link{format}} returns a character vector \cr
+  \code{\link{is.na}} and \code{\link{!}} return a logical vector \cr
+  \code{\link{sqrt}}, \code{\link{log}}, \code{\link{log2}} and \code{\link{log10}} return a double vector \cr
+  \code{\link{sign}}, \code{\link{abs}}, \code{\link{floor}}, \code{\link{ceiling}}, \code{\link{trunc}} and 
+  \code{\link{round}} return a vector of class 'integer64' \cr
+  \code{\link{signif}} is not implemented 
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{xor.integer64}} \code{\link{integer64}}  }
+\examples{
+  sqrt(as.integer64(1:12))
+}
diff --git a/man/hashcache.rd b/man/hashcache.rd
new file mode 100644
index 0000000..50ba210
--- /dev/null
+++ b/man/hashcache.rd
@@ -0,0 +1,58 @@
+\name{hashcache}
+\alias{hashcache}
+\alias{sortcache}
+\alias{sortordercache}
+\alias{ordercache}
+\title{
+		Big caching of hashing, sorting, ordering
+}
+\description{
+	Functions to create cache that accelerates many operations
+}
+\usage{
+hashcache(x, nunique=NULL, \dots)
+sortcache(x, has.na = NULL)
+sortordercache(x, has.na = NULL, stable = NULL)
+ordercache(x, has.na = NULL, stable = NULL, optimize = "time")
+}
+\arguments{
+  \item{x}{
+		an atomic vector (note that currently only integer64 is supported)
+}
+  \item{nunique}{ giving \emph{correct} number of unique elements can help reducing the size of the hashmap }
+  \item{has.na}{
+boolean scalar defining whether the input vector might contain \code{NA}s. If we know we don't have NAs, this may speed-up.
+\emph{Note} that you risk a crash if there are unexpected \code{NA}s with \code{has.na=FALSE}
+}
+  \item{stable}{
+boolean scalar defining whether stable sorting is needed. Allowing non-stable may speed-up.
+}
+  \item{optimize}{
+by default ramsort optimizes for 'time' which requires more RAM,
+set to 'memory' to minimize RAM requirements and sacrifice speed
+}
+  \item{\dots}{
+		passed to \code{\link{hashmap}}
+}
+}
+\details{
+	The result of relative expensive operations \code{\link{hashmap}}, \code{\link{ramsort}}, \code{\link{ramsortorder}} and \code{\link{ramorder}} can be stored in a cache in order to avoid multiple excutions. Unless in very specific situations, the recommended method is \code{hashsortorder} only.
+}
+\note{
+  Note that we consider storing the big results from sorting and/or ordering as a relevant side-effect, 
+and therefore storing them in the cache should require a conscious decision of the user.
+}
+\value{
+	\code{x} with a \code{\link{cache}} that contains the result of the expensive operations, possible together with small derived information (such as \code{\link{nunique.integer64}}) and previously cached results.
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+	\code{\link{cache}} for caching functions and \code{\link{nunique}} for methods bennefitting from small caches
+}
+\examples{
+	x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+ sortordercache(x)
+}
+\keyword{ environment }
diff --git a/man/hashmap.rd b/man/hashmap.rd
new file mode 100644
index 0000000..5a2226d
--- /dev/null
+++ b/man/hashmap.rd
@@ -0,0 +1,159 @@
+\name{hashmap}
+\alias{hashfun}
+\alias{hashfun.integer64}
+\alias{hashmap}
+\alias{hashmap.integer64}
+\alias{hashpos}
+\alias{hashpos.cache_integer64}
+\alias{hashrev}
+\alias{hashrev.cache_integer64}
+\alias{hashfin}
+\alias{hashfin.cache_integer64}
+\alias{hashrin}
+\alias{hashrin.cache_integer64}
+\alias{hashdup}
+\alias{hashdup.cache_integer64}
+\alias{hashuni}
+\alias{hashuni.cache_integer64}
+\alias{hashmapuni}
+\alias{hashmapuni.integer64}
+\alias{hashupo}
+\alias{hashupo.cache_integer64}
+\alias{hashmapupo}
+\alias{hashmapupo.integer64}
+\alias{hashtab}
+\alias{hashtab.cache_integer64}
+\alias{hashmaptab}
+\alias{hashmaptab.integer64}
+\title{
+   Hashing for 64bit integers
+}
+\description{
+This is an explicit implementation of hash functionality that underlies 
+matching and other functions in R. Explicit means that you can create, 
+store and use hash functionality directly. One advantage is that you can
+re-use hashmaps, which avoid re-building hashmaps again and again.
+}
+\usage{
+hashfun(x, \dots)
+\method{hashfun}{integer64}(x, minfac=1.41, hashbits=NULL, \dots)
+hashmap(x, \dots)
+\method{hashmap}{integer64}(x, nunique=NULL, minfac=1.41, hashbits=NULL, cache=NULL, \dots)
+hashpos(cache, \dots)
+\method{hashpos}{cache_integer64}(cache, x, nomatch = NA_integer_, \dots)
+hashrev(cache, \dots)
+\method{hashrev}{cache_integer64}(cache, x, nomatch = NA_integer_, \dots)
+hashfin(cache, \dots)
+\method{hashfin}{cache_integer64}(cache, x, \dots)
+hashrin(cache, \dots)
+\method{hashrin}{cache_integer64}(cache, x, \dots)
+hashdup(cache, \dots)
+\method{hashdup}{cache_integer64}(cache, \dots)
+hashuni(cache, \dots)
+\method{hashuni}{cache_integer64}(cache, keep.order=FALSE, \dots)
+hashmapuni(x, \dots)
+\method{hashmapuni}{integer64}(x, nunique=NULL, minfac=1.5, hashbits=NULL, \dots)
+hashupo(cache, \dots)
+\method{hashupo}{cache_integer64}(cache, keep.order=FALSE, \dots)
+hashmapupo(x, \dots)
+\method{hashmapupo}{integer64}(x, nunique=NULL, minfac=1.5, hashbits=NULL, \dots)
+hashtab(cache, \dots)
+\method{hashtab}{cache_integer64}(cache, \dots)
+hashmaptab(x, \dots)
+\method{hashmaptab}{integer64}(x, nunique=NULL, minfac=1.5, hashbits=NULL, \dots)
+}
+\arguments{
+  \item{x}{ an integer64 vector }
+  \item{hashmap}{ an object of class 'hashmap' i.e. here 'cache_integer64' }
+  \item{minfac}{ minimum factor by which the hasmap has more elements compared to the data \code{x}, ignored if \code{hashbits} is given directly }
+  \item{hashbits}{ length of hashmap is \code{2^hashbits} }
+  \item{cache}{ an optional \code{\link{cache}} object into which to put the hashmap (by default a new cache is created)}
+  \item{nunique}{ giving \emph{correct} number of unique elements can help reducing the size of the hashmap }
+  \item{nomatch}{ the value to be returned if an element is not found in the hashmap }
+  \item{keep.order}{ determines order of results and speed: \code{FALSE} (the default) is faster and returns in the (pseudo)random order of the hash function, \code{TRUE} returns in the order of first appearance in the original data, but this requires extra work } 
+  \item{\dots}{ further arguments, passed from generics, ignored in methods }
+}
+\details{
+\tabular{rrl}{
+   \bold{function} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{hashfun} \tab \code{\link[digest]{digest}} \tab export of the hash function used in \code{hashmap} \cr
+   \code{hashmap} \tab \code{\link[=match.integer64]{match}} \tab return hashmap \cr
+   \code{hashpos} \tab \code{\link[=match.integer64]{match}} \tab return positions of \code{x} in \code{hashmap} \cr
+   \code{hashrev} \tab \code{\link[=match.integer64]{match}} \tab return positions of \code{hashmap} in \code{x} \cr
+   \code{hashfin} \tab \code{\link{\%in\%.integer64}} \tab return logical whether \code{x} is in \code{hashmap} \cr
+   \code{hashrin} \tab \code{\link{\%in\%.integer64}} \tab return logical whether \code{hashmap} is in \code{x}  \cr
+   \code{hashdup} \tab \code{\link[=duplicated.integer64]{duplicated}} \tab return logical whether hashdat is duplicated using hashmap\cr
+   \code{hashuni} \tab \code{\link[=unique.integer64]{unique}} \tab return unique values of hashmap  \cr
+   \code{hashmapuni} \tab \code{\link[=unique.integer64]{unique}} \tab return unique values of \code{x}  \cr
+   \code{hashupo} \tab \code{\link[=unique.integer64]{unique}} \tab return positions of unique values in hashdat \cr
+   \code{hashmapupo} \tab \code{\link[=unique.integer64]{unique}} \tab return positions of unique values in \code{x} \cr
+   \code{hashtab} \tab \code{\link[=table.integer64]{table}} \tab tabulate values of hashdat using hashmap in \code{keep.order=FALSE} \cr
+   \code{hashmaptab} \tab \code{\link[=table.integer64]{table}} \tab tabulate values of \code{x} building hasmap on the fly in \code{keep.order=FALSE}\cr
+}
+}
+\value{
+  see details
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ programming }
+\keyword{ manip }
+\seealso{ \code{\link[=match.integer64]{match}} }
+\examples{
+x <- as.integer64(sample(c(NA, 0:9)))
+y <- as.integer64(sample(c(NA, 1:9), 10, TRUE))
+hashfun(y)
+hx <- hashmap(x)
+hy <- hashmap(y)
+ls(hy)
+hashpos(hy, x)
+hashrev(hx, y)
+hashfin(hy, x)
+hashrin(hx, y)
+hashdup(hy)
+hashuni(hy)
+hashuni(hy, keep.order=TRUE)
+hashmapuni(y)
+hashupo(hy)
+hashupo(hy, keep.order=TRUE)
+hashmapupo(y)
+hashtab(hy)
+hashmaptab(y)
+
+stopifnot(identical(match(as.integer(x),as.integer(y)),hashpos(hy, x)))
+stopifnot(identical(match(as.integer(x),as.integer(y)),hashrev(hx, y)))
+stopifnot(identical(as.integer(x) \%in\% as.integer(y), hashfin(hy, x)))
+stopifnot(identical(as.integer(x) \%in\% as.integer(y), hashrin(hx, y)))
+stopifnot(identical(duplicated(as.integer(y)), hashdup(hy)))
+stopifnot(identical(as.integer64(unique(as.integer(y))), hashuni(hy, keep.order=TRUE)))
+stopifnot(identical(sort(hashuni(hy, keep.order=FALSE)), sort(hashuni(hy, keep.order=TRUE))))
+stopifnot(identical(y[hashupo(hy, keep.order=FALSE)], hashuni(hy, keep.order=FALSE)))
+stopifnot(identical(y[hashupo(hy, keep.order=TRUE)], hashuni(hy, keep.order=TRUE)))
+stopifnot(identical(hashpos(hy, hashuni(hy, keep.order=TRUE)), hashupo(hy, keep.order=TRUE)))
+stopifnot(identical(hashpos(hy, hashuni(hy, keep.order=FALSE)), hashupo(hy, keep.order=FALSE)))
+stopifnot(identical(hashuni(hy, keep.order=FALSE), hashtab(hy)$values))
+stopifnot(identical(as.vector(table(as.integer(y), useNA="ifany"))
+, hashtab(hy)$counts[order.integer64(hashtab(hy)$values)]))
+stopifnot(identical(hashuni(hy, keep.order=TRUE), hashmapuni(y)))
+stopifnot(identical(hashupo(hy, keep.order=TRUE), hashmapupo(y)))
+stopifnot(identical(hashtab(hy), hashmaptab(y)))
+
+	\dontrun{
+	message("explore speed given size of the hasmap in 2^hashbits and size of the data")
+	message("more hashbits means more random access and less collisions")
+	message("i.e. more data means less random access and more collisions")
+	bits <- 24
+	b <- seq(-1, 0, 0.1)
+	tim <- matrix(NA, length(b), 2, dimnames=list(b, c("bits","bits+1")))
+    for (i in 1:length(b)){
+	  n <- as.integer(2^(bits+b[i]))
+	  x <- as.integer64(sample(n))
+	  tim[i,1] <- repeat.time(hashmap(x, hashbits=bits))[3]
+	  tim[i,2] <- repeat.time(hashmap(x, hashbits=bits+1))[3]
+	  print(tim)
+      matplot(b, tim)
+	}
+	message("we conclude that n*sqrt(2) is enough to avoid collisions")
+	}
+}
diff --git a/man/identical.integer64.rd b/man/identical.integer64.rd
new file mode 100644
index 0000000..b134b1e
--- /dev/null
+++ b/man/identical.integer64.rd
@@ -0,0 +1,37 @@
+\name{identical.integer64}
+\alias{identical.integer64}
+\title{
+   Identity function for class 'integer64'
+}
+\description{
+  This will discover any deviation between objects containing integer64 vectors. 
+}
+\usage{
+ identical.integer64(x, y, num.eq = FALSE, single.NA = FALSE
+, attrib.as.set = TRUE, ignore.bytecode = TRUE)
+}
+\arguments{
+  \item{x}{ atomic vector of class 'integer64' }
+  \item{y}{ atomic vector of class 'integer64' }
+  \item{num.eq}{ see \code{\link{identical}} }
+  \item{single.NA}{ see \code{\link{identical}} }
+  \item{attrib.as.set}{ see \code{\link{identical}} }
+  \item{ignore.bytecode}{ see \code{\link{identical}} }
+}
+\details{
+  This is simply a wrapper to \code{\link{identical}} with default arguments \code{num.eq = FALSE, single.NA = FALSE}.
+}
+\value{
+  A single logical value, \code{TRUE} or \code{FALSE}, never \code{NA} and never anything other than a single value. 
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{==.integer64}} \code{\link{identical}} \code{\link{integer64}}  }
+\examples{
+  i64 <- as.double(NA); class(i64) <- "integer64"
+  identical(i64-1, i64+1)
+  identical.integer64(i64-1, i64+1)
+}
diff --git a/man/is.sorted.integer64.rd b/man/is.sorted.integer64.rd
new file mode 100644
index 0000000..fdf87e5
--- /dev/null
+++ b/man/is.sorted.integer64.rd
@@ -0,0 +1,64 @@
+\name{is.sorted.integer64}
+\alias{is.sorted.integer64}
+\alias{na.count.integer64}
+\alias{nvalid.integer64}
+\alias{nunique.integer64}
+\alias{nties.integer64}
+\title{
+	Small cache access methods
+}
+\description{
+	These methods are packaged here for methods in packages \code{bit64} and \code{ff}.
+}
+\usage{
+	\method{is.sorted}{integer64}(x, \dots)
+	\method{na.count}{integer64}(x, \dots)
+	\method{nvalid}{integer64}(x, \dots)
+	\method{nunique}{integer64}(x, \dots)
+	\method{nties}{integer64}(x, \dots)
+}
+\arguments{
+  \item{x}{
+	some object
+	}
+  \item{\dots}{
+	ignored
+	}
+}
+\details{
+  All these functions benefit from a \code{\link{sortcache}}, \code{\link{ordercache}} or \code{\link{sortordercache}}.  
+  \code{na.count}, \code{nvalid} and \code{nunique} also benefit from a \code{\link{hashcache}}.
+	\cr
+	\code{is.sorted} checks for sortedness of \code{x} (NAs sorted first) \cr
+ \code{na.count} returns the number of \code{NA}s \cr 
+ \code{nvalid} returns the number of valid data points, usually \code{\link{length}} minus \code{na.count}. \cr
+ \code{nunique} returns the number of unique values \cr
+ \code{nties} returns the number of tied values. 
+}
+\note{
+	If a \code{\link{cache}} exists but the desired value is not cached, 
+ then these functions will store their result in the cache. 
+ We do not consider this a relevant side-effect, 
+ since these small cache results do not have a relevant memory footprint.
+}
+\value{
+	\code{is.sorted} returns a logical scalar, the other methods return an integer scalar.
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+	\code{\link{cache}} for caching functions and \code{\link{sortordercache}} for functions creating big caches
+}
+\examples{
+	x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+ length(x)
+ na.count(x)
+ nvalid(x)
+ nunique(x)
+ nties(x)
+ table.integer64(x)
+ x
+}
+\keyword{ environment }
+\keyword{ methods }
diff --git a/man/keypos.rd b/man/keypos.rd
new file mode 100644
index 0000000..068e555
--- /dev/null
+++ b/man/keypos.rd
@@ -0,0 +1,42 @@
+\name{keypos}
+\alias{keypos}
+\alias{keypos.integer64}
+\title{Extract Positions in redundant dimension table}
+\description{
+  \code{keypos} returns the positions of the (fact table) elements that participate in their sorted unique subset (dimension table)
+}
+\usage{
+keypos(x, \dots)
+\method{keypos}{integer64}(x, method = NULL, \dots)
+}
+\arguments{
+  \item{x}{a vector or a data frame or an array or \code{NULL}.}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{\dots}{ignored}
+}
+\details{
+  NAs are sorted first in the dimension table, see \code{\link{ramorder.integer64}}.
+  \cr
+  This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+  Suitable methods are \code{\link{sortorderkey}} (fast ordering) 
+and \code{\link{orderkey}} (memory saving ordering).
+}
+\value{
+  an integer vector of the same length as code{x} containing positions relativ to code{sort(unique(x), na.last=FALSE)}
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+  \code{\link{unique.integer64}} for the unique subset and \code{\link{match.integer64}} for finding positions in a different vector.
+}
+\examples{
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+keypos(x)
+
+stopifnot(identical(keypos(x),  match.integer64(x, sort(unique(x), na.last=FALSE))))
+}
+\keyword{manip}
+\keyword{univar}
diff --git a/man/match.integer64.rd b/man/match.integer64.rd
new file mode 100644
index 0000000..44fd6ce
--- /dev/null
+++ b/man/match.integer64.rd
@@ -0,0 +1,125 @@
+\name{match.integer64}
+\alias{match.integer64}
+\alias{\%in\%.integer64}
+\title{
+64-bit integer matching
+}
+\description{
+\code{match} returns a vector of the positions of (first) matches of its first argument in its second. 
+
+\code{\%in\%} is a more intuitive interface as a binary operator, which returns a logical vector indicating if there is a match or not for its left operand. 
+
+}
+\usage{
+\method{match}{integer64}(x, table, nomatch = NA_integer_, nunique = NULL, method = NULL, ...)
+\method{\%in\%}{integer64}(x, table, ...)
+}
+\arguments{
+  \item{x}{
+	integer64 vector: the values to be matched, optionally carrying a cache created with \code{\link{hashcache}}
+}
+  \item{table}{
+	integer64 vector: the values to be matched against, optionally carrying a cache created with \code{\link{hashcache}} or \code{\link{sortordercache}}
+}
+  \item{nomatch}{
+  the value to be returned in the case when no match is found. Note that it is coerced to integer.
+}
+  \item{nunique}{
+	NULL or the number of unique values of table (including NA). Providing \code{nunique} can speed-up matching when \code{table} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{\dots}{
+ignored
+}
+}
+\details{
+  These functions automatically choose from several low-level functions considering the size of \code{x} and \code{table} and the availability of caches. 
+
+
+  Suitable methods for \code{\%in\%.integer64} are \code{\link{hashpos}} (hash table lookup), \code{\link{hashrev}} (reverse lookup), \code{\link{sortorderpos}} (fast ordering) and \code{\link{orderpos}} (memory saving ordering).
+  Suitable methods for \code{match.integer64} are \code{\link{hashfin}} (hash table lookup), \code{\link{hashrin}} (reverse lookup), \code{\link{sortfin}} (fast sorting) and \code{\link{orderfin}} (memory saving ordering).
+}
+\value{
+  A vector of the same length as \code{x}.
+
+  \code{match}: An integer vector giving the position in \code{table} of
+  the first match if there is a match, otherwise \code{nomatch}.
+
+  If \code{x[i]} is found to equal \code{table[j]} then the value
+  returned in the \code{i}-th position of the return value is \code{j},
+  for the smallest possible \code{j}.  If no match is found, the value
+  is \code{nomatch}.
+
+  \code{\%in\%}: A logical vector, indicating if a match was located for
+  each element of \code{x}: thus the values are \code{TRUE} or
+  \code{FALSE} and never \code{NA}.
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+	\code{\link{match}}
+}
+\examples{
+x <- as.integer64(c(NA, 0:9), 32)
+table <- as.integer64(c(1:9, NA))
+match.integer64(x, table)
+"\%in\%.integer64"(x, table)
+
+x <- as.integer64(sample(c(rep(NA, 9), 0:9), 32, TRUE))
+table <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+stopifnot(identical(match.integer64(x, table), match(as.integer(x), as.integer(table))))
+stopifnot(identical("\%in\%.integer64"(x, table), as.integer(x) \%in\% as.integer(table)))
+
+\dontrun{
+	message("check when reverse hash-lookup beats standard hash-lookup")
+	e <- 4:24
+	timx <- timy <- matrix(NA, length(e), length(e), dimnames=list(e,e))
+	for (iy in seq_along(e))
+	for (ix in 1:iy){
+		nx <- 2^e[ix]
+		ny <- 2^e[iy]
+		x <- as.integer64(sample(ny, nx, FALSE))
+		y <- as.integer64(sample(ny, ny, FALSE))
+		#hashfun(x, bits=as.integer(5))
+		timx[ix,iy] <- repeat.time({
+		hx <- hashmap(x)
+		py <- hashrev(hx, y)
+		})[3]
+		timy[ix,iy] <- repeat.time({
+		hy <- hashmap(y)
+		px <- hashpos(hy, x)
+		})[3]
+		#identical(px, py)
+		print(round(timx[1:iy,1:iy]/timy[1:iy,1:iy], 2), na.print="")
+	}
+
+	message("explore best low-level method given size of x and table")
+	B1 <- 1:27
+	B2 <- 1:27
+	tim <- array(NA, dim=c(length(B1), length(B2), 5)
+ , dimnames=list(B1, B2, c("hashpos","hashrev","sortpos1","sortpos2","sortpos3")))
+	for (i1 in B1)
+	for (i2 in B2)
+	{
+	  b1 <- B1[i1]
+	  b2 <- B1[i2]
+	  n1 <- 2^b1
+	  n2 <- 2^b2
+	  x1 <- as.integer64(c(sample(n2, n1-1, TRUE), NA))
+	  x2 <- as.integer64(c(sample(n2, n2-1, TRUE), NA))
+	  tim[i1,i2,1] <- repeat.time({h <- hashmap(x2);hashpos(h, x1);rm(h)})[3]
+	  tim[i1,i2,2] <- repeat.time({h <- hashmap(x1);hashrev(h, x2);rm(h)})[3]
+	  s <- clone(x2); o <- seq_along(s); ramsortorder(s, o)
+	  tim[i1,i2,3] <- repeat.time(sortorderpos(s, o, x1, method=1))[3]
+	  tim[i1,i2,4] <- repeat.time(sortorderpos(s, o, x1, method=2))[3]
+	  tim[i1,i2,5] <- repeat.time(sortorderpos(s, o, x1, method=3))[3]
+	  rm(s,o)
+	  print(apply(tim, 1:2, function(ti)if(any(is.na(ti)))NA else which.min(ti)))
+	}
+}
+}
+\keyword{manip}
+\keyword{logic}
diff --git a/man/optimizer64.data.rd b/man/optimizer64.data.rd
new file mode 100644
index 0000000..d7aecc7
--- /dev/null
+++ b/man/optimizer64.data.rd
@@ -0,0 +1,103 @@
+\name{optimizer64.data}
+\alias{optimizer64.data}
+\docType{data}
+\title{
+ Results of performance measurement on a Core i7 Lenovo T410 8 GB RAM under Windows 7 64bit
+}
+\description{
+  These are the results of calling \code{\link{optimizer64}}
+}
+\usage{data(optimizer64.data)}
+\format{
+  The format is:
+List of 16
+ $ : num [1:9, 1:3] 0 0 1.63 0.00114 2.44 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:9] "match" "match.64" "hashpos" "hashrev" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:10, 1:3] 0 0 0 1.62 0.00114 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:10] "\%in\%" "match.64" "\%in\%.64" "hashfin" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:10, 1:3] 0 0 0.00105 0.00313 0.00313 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:10] "duplicated" "duplicated.64" "hashdup" "sortorderdup1" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:15, 1:3] 0 0 0 0.00104 0.00104 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:15] "unique" "unique.64" "hashmapuni" "hashuni" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:14, 1:3] 0 0 0 0.000992 0.000992 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:14] "unique" "unipos.64" "hashmapupo" "hashupo" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:13, 1:3] 0 0 0 0 0.000419 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:13] "tabulate" "table" "table.64" "hashmaptab" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:7, 1:3] 0 0 0 0.00236 0.00714 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:7] "rank" "rank.keep" "rank.64" "sortorderrnk" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:6, 1:3] 0 0 0.00189 0.00714 0 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:6] "quantile" "quantile.64" "sortqtl" "orderqtl" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:9, 1:3] 0 0 0.00105 1.17 0 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:9] "match" "match.64" "hashpos" "hashrev" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:10, 1:3] 0 0 0 0.00104 1.18 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:10] "\%in\%" "match.64" "\%in\%.64" "hashfin" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:10, 1:3] 0 0 1.64 2.48 2.48 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:10] "duplicated" "duplicated.64" "hashdup" "sortorderdup1" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:15, 1:3] 0 0 0 1.64 1.64 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:15] "unique" "unique.64" "hashmapuni" "hashuni" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:14, 1:3] 0 0 0 1.62 1.62 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:14] "unique" "unipos.64" "hashmapupo" "hashupo" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:13, 1:3] 0 0 0 0 0.32 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:13] "tabulate" "table" "table.64" "hashmaptab" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:7, 1:3] 0 0 0 2.96 10.69 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:7] "rank" "rank.keep" "rank.64" "sortorderrnk" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ $ : num [1:6, 1:3] 0 0 1.62 10.61 0 ...
+  ..- attr(*, "dimnames")=List of 2
+  .. ..$ : chr [1:6] "quantile" "quantile.64" "sortqtl" "orderqtl" ...
+  .. ..$ : chr [1:3] "prep" "both" "use"
+ - attr(*, "dim")= int [1:2] 8 2
+ - attr(*, "dimnames")=List of 2
+  ..$ : chr [1:8] "match" "\%in\%" "duplicated" "unique" ...
+  ..$ : chr [1:2] "65536" "33554432"
+}
+\examples{
+data(optimizer64.data)
+print(optimizer64.data)
+oldpar <- par(no.readonly = TRUE)
+par(mfrow=c(2,1))
+par(cex=0.7)
+for (i in 1:nrow(optimizer64.data)){
+ for (j in 1:2){
+   tim <- optimizer64.data[[i,j]]
+  barplot(t(tim))
+  if (rownames(optimizer64.data)[i]=="match")
+   title(paste("match", colnames(optimizer64.data)[j], "in", colnames(optimizer64.data)[3-j]))
+  else if (rownames(optimizer64.data)[i]=="\%in\%")
+   title(paste(colnames(optimizer64.data)[j], "\%in\%", colnames(optimizer64.data)[3-j]))
+  else
+   title(paste(rownames(optimizer64.data)[i], colnames(optimizer64.data)[j]))
+ }
+}
+par(mfrow=c(1,1))
+}
+\keyword{datasets}
diff --git a/man/plusclass.rd b/man/plusclass.rd
new file mode 100644
index 0000000..aff6933
--- /dev/null
+++ b/man/plusclass.rd
@@ -0,0 +1,33 @@
+\name{plusclass}
+\alias{plusclass}
+\alias{minusclass}
+\title{
+   integer64: Maintaining S3 class attribute
+}
+\description{
+  Maintaining integer64 S3 class attribute.
+}
+\usage{
+  plusclass(class, whichclass)
+  minusclass(class, whichclass)
+}
+\arguments{
+  \item{class}{ NULL or a character vector of class attributes }
+  \item{whichclass}{ the (single) class name to add or remove from the class vector  }
+}
+\value{
+  NULL or a character vector of class attributes
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\keyword{ internal }
+\seealso{ 
+  \code{\link{oldClass}} \code{\link{integer64}}  
+}
+\examples{
+  plusclass("inheritingclass","integer64")
+  minusclass(c("inheritingclass","integer64"), "integer64")
+}
diff --git a/man/prank.rd b/man/prank.rd
new file mode 100644
index 0000000..40f53e1
--- /dev/null
+++ b/man/prank.rd
@@ -0,0 +1,39 @@
+\name{prank}
+\alias{prank}
+\alias{prank.integer64}
+\title{(P)ercent (Rank)s}
+\description{
+	Function \code{prank.integer64}  projects the values [min..max] via ranks [1..n] to [0..1]. 
+	\code{\link{qtile.integer64}} is the inverse function of 'prank.integer64' and projects [0..1] to [min..max].
+}
+\usage{
+	prank(x, \dots)
+	\method{prank}{integer64}(x, method = NULL, \dots)
+}
+\arguments{
+  \item{x}{a integer64 vector}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{\dots}{ignored}
+}
+\details{
+	Function \code{prank.integer64} is based on \code{\link{rank.integer64}}.
+}
+\value{
+  \code{prank} returns a numeric vector of the same length as \code{x}.
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+  \code{\link{rank.integer64}} for simple ranks and \code{\link{qtile}} for the inverse function quantiles.
+}
+\examples{
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+prank(x)
+
+x <- x[!is.na(x)]
+stopifnot(identical(x,  unname(qtile(x, probs=prank(x)))))
+}
+\keyword{univar}
diff --git a/man/qtile.rd b/man/qtile.rd
new file mode 100644
index 0000000..134394c
--- /dev/null
+++ b/man/qtile.rd
@@ -0,0 +1,79 @@
+\name{qtile}
+\alias{qtile}
+\alias{qtile.integer64}
+\alias{quantile.integer64}
+\alias{median.integer64}
+\alias{mean.integer64}
+\alias{summary.integer64}
+\title{(Q)uan(Tile)s }
+\description{
+	Function \code{\link{prank.integer64}}  projects the values [min..max] via ranks [1..n] to [0..1]. 
+	\code{qtile.ineger64} is the inverse function of 'prank.integer64' and projects [0..1] to [min..max].
+}
+\usage{
+	qtile(x, probs=seq(0, 1, 0.25), \dots)
+	\method{qtile}{integer64}(x, probs = seq(0, 1, 0.25), names = TRUE, method = NULL, \dots)
+	\method{quantile}{integer64}(x, probs = seq(0, 1, 0.25), na.rm = FALSE, names = TRUE, type=0L, \dots)
+ \method{mean}{integer64}(x, na.rm = FALSE, \dots)
+	\method{summary}{integer64}(object, \dots)
+ ## mean(x, na.rm = FALSE, ...)
+ ## or
+ ## mean(x, na.rm = FALSE)
+}
+\arguments{
+  \item{x}{a integer64 vector}
+  \item{object}{a integer64 vector}
+  \item{probs}{
+		numeric vector of probabilities with values in [0,1] - possibly containing \code{NA}s
+}
+  \item{names}{
+	logical; if \code{TRUE}, the result has a \code{names} attribute. Set to \code{FALSE} for speedup with many probs.
+}
+  \item{type}{
+	an integer selecting the quantile algorithm, currently only 0 is supported, see details
+}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{na.rm}{
+	logical; if \code{TRUE}, any \code{NA} and \code{NaN}'s are removed from \code{x} before the quantiles are computed.
+}
+  \item{\dots}{ignored}
+}
+\details{
+ Functions \code{quantile.integer64} with \code{type=0} and \code{median.integer64} are convenience wrappers to \code{qtile}.
+ \cr
+	Function \code{qtile} behaves very similar to \code{quantile.default} with \code{type=1} 
+ in that it only returns existing values, it is mostly symetric 
+ but it is using 'round' rather than 'floor'. 
+ \cr
+ Note that this implies that \code{median.integer64} does not interpolate for even number of values 
+(interpolation would create values that could not be represented as 64-bit integers).
+ \cr
+  This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+  Suitable methods are \code{\link{sortqtl}} (fast sorting) 
+and \code{\link{orderqtl}} (memory saving ordering).
+}
+\value{
+  \code{prank} returns a numeric vector of the same length as \code{x}.
+  \cr
+  \code{qtile} returns a vector with elements from \code{x} 
+  at the relative positions specified by \code{probs}.
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+  \code{\link{rank.integer64}} for simple ranks and \code{\link{quantile}} for quantiles.
+}
+\examples{
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+qtile(x, probs=seq(0, 1, 0.25))
+quantile(x, probs=seq(0, 1, 0.25), na.rm=TRUE)
+median(x, na.rm=TRUE)
+summary(x)
+
+x <- x[!is.na(x)]
+stopifnot(identical(x,  unname(qtile(x, probs=prank(x)))))
+}
+\keyword{univar}
diff --git a/man/ramsort.integer64.rd b/man/ramsort.integer64.rd
new file mode 100644
index 0000000..1758d12
--- /dev/null
+++ b/man/ramsort.integer64.rd
@@ -0,0 +1,117 @@
+\name{ramsort.integer64}
+\alias{ramsort.integer64}
+\alias{shellsort.integer64}
+\alias{quicksort.integer64}
+\alias{mergesort.integer64}
+\alias{radixsort.integer64}
+\alias{ramorder.integer64}
+\alias{shellorder.integer64}
+\alias{quickorder.integer64}
+\alias{mergeorder.integer64}
+\alias{radixorder.integer64}
+\alias{ramsortorder.integer64}
+\alias{shellsortorder.integer64}
+\alias{quicksortorder.integer64}
+\alias{mergesortorder.integer64}
+\alias{radixsortorder.integer64}
+\title{
+   Low-level intger64 methods for in-RAM sorting and ordering
+}
+\description{
+  Fast low-level methods for sorting and ordering. 
+  The \code{..sortorder} methods do sorting and ordering at once, which requires more RAM than ordering but is (almost) as fast as as sorting.
+}
+\note{
+ Note that these methods purposely violate the functional programming paradigm: they are called for the side-effect of changing some of their arguments.
+ The \code{sort}-methods change \code{x}, the \code{order}-methods change \code{i}, and the \code{sortoder}-methods change both \code{x} and \code{i}
+}
+\usage{
+\method{shellsort}{integer64}(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+\method{shellsortorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+\method{shellorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+\method{mergesort}{integer64}(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+\method{mergeorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+\method{mergesortorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, \dots)
+\method{quicksort}{integer64}(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, restlevel=floor(1.5*log2(length(x))), \dots)
+\method{quicksortorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, restlevel=floor(1.5*log2(length(x))), \dots)
+\method{quickorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE
+, restlevel=floor(1.5*log2(length(x))), \dots)
+\method{radixsort}{integer64}(x, has.na=TRUE, na.last=FALSE, decreasing=FALSE, radixbits=8L, \dots)
+\method{radixsortorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, radixbits=8L, \dots)
+\method{radixorder}{integer64}(x, i, has.na=TRUE, na.last=FALSE, decreasing=FALSE, radixbits=8L, \dots)
+\method{ramsort}{integer64}(x, has.na = TRUE, na.last=FALSE, decreasing = FALSE, stable = TRUE
+, optimize = c("time", "memory"), VERBOSE = FALSE, \dots)
+\method{ramsortorder}{integer64}(x, i, has.na = TRUE, na.last=FALSE, decreasing = FALSE, stable = TRUE
+, optimize = c("time", "memory"), VERBOSE = FALSE, \dots)
+\method{ramorder}{integer64}(x, i, has.na = TRUE, na.last=FALSE, decreasing = FALSE, stable = TRUE
+, optimize = c("time", "memory"), VERBOSE = FALSE, \dots)
+}
+\arguments{
+  \item{x}{ a vector to be sorted by \code{\link{ramsort}} and \code{\link{ramsortorder}}, i.e. the output of  \code{\link{sort}} }
+  \item{i}{ integer positions to be modified by \code{\link{ramorder}} and \code{\link{ramsortorder}}, default is 1:n, in this case the output is similar to \code{\link{order}} }
+  \item{has.na}{
+boolean scalar defining whether the input vector might contain \code{NA}s. If we know we don't have NAs, this may speed-up.
+\emph{Note} that you risk a crash if there are unexpected \code{NA}s with \code{has.na=FALSE}
+}
+  \item{na.last}{
+boolean scalar telling ramsort whether to sort \code{NA}s last or first.
+\emph{Note} that 'boolean' means that there is no third option \code{NA} as in \code{\link{sort}}
+}
+  \item{decreasing}{
+boolean scalar telling ramsort whether to sort increasing or decreasing
+}
+  \item{stable}{
+boolean scalar defining whether stable sorting is needed. Allowing non-stable may speed-up.
+}
+  \item{optimize}{
+by default ramsort optimizes for 'time' which requires more RAM,
+set to 'memory' to minimize RAM requirements and sacrifice speed
+}
+  \item{restlevel}{
+number of remaining recursionlevels before \code{quicksort} switches from recursing to \code{shellsort}
+}
+  \item{radixbits}{
+	size of radix in bits
+}
+  \item{VERBOSE}{
+  cat some info about chosen method
+}
+  \item{\dots}{ further arguments, passed from generics, ignored in methods }
+}
+\details{
+ see \code{\link[bit]{ramsort}}
+}
+\value{
+  These functions return the number of \code{NAs} found or assumed during sorting
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ programming }
+\keyword{ manip }
+\seealso{ \code{\link{ramsort}} for the generic, \code{\link[ff]{ramsort.default}} for the methods provided by package \code{\link[ff]{ff}}, \code{\link{sort.integer64}} for the sort interface and \code{\link{sortcache}} for caching the work of sorting}
+\examples{
+  x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+  x
+  message("ramsort example")
+  s <- clone(x)
+  ramsort(s)
+  message("s has been changed in-place - whether or not ramsort uses an in-place algorithm")
+  s
+  message("ramorder example")
+  s <- clone(x)
+  o <- seq_along(s)
+  ramorder(s, o)
+  message("o has been changed in-place - s remains unchanged")
+  s
+  o
+  s[o]
+  message("ramsortorder example")
+  o <- seq_along(s)
+  ramsortorder(s, o)
+  message("s and o have both been changed in-place - this is much faster")
+  s
+  o
+}
diff --git a/man/rank.integer64.rd b/man/rank.integer64.rd
new file mode 100644
index 0000000..4ec0cf6
--- /dev/null
+++ b/man/rank.integer64.rd
@@ -0,0 +1,39 @@
+\name{rank.integer64}
+\alias{rank.integer64}
+\title{Sample Ranks from integer64}
+\description{
+  Returns the sample ranks of the values in a vector.  Ties (i.e., equal
+  values) are averaged and missing values propagated.
+}
+\usage{
+	\method{rank}{integer64}(x, method = NULL, \dots)
+}
+\arguments{
+  \item{x}{a integer64 vector}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{\dots}{ignored}
+}
+\details{
+  This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+  Suitable methods are \code{\link{sortorderrnk}} (fast ordering) 
+and \code{\link{orderrnk}} (memory saving ordering).
+}
+\value{
+  A numeric vector of the same length as \code{x}.
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+  \code{\link{order.integer64}}, \code{\link{rank}} and \code{\link{prank}} for percent rank.
+}
+\examples{
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+rank.integer64(x)
+
+stopifnot(identical(rank.integer64(x),  rank(as.integer(x)
+, na.last="keep", ties.method = "average")))
+}
+\keyword{univar}
diff --git a/man/rep.integer64.rd b/man/rep.integer64.rd
new file mode 100644
index 0000000..ced8aeb
--- /dev/null
+++ b/man/rep.integer64.rd
@@ -0,0 +1,31 @@
+\name{rep.integer64}
+\alias{rep.integer64}
+\title{
+   Replicate elements of integer64 vectors
+}
+\description{
+  Replicate elements of integer64 vectors
+}
+\usage{
+\method{rep}{integer64}(x, \dots)
+}
+\arguments{
+  \item{x}{ a vector of 'integer64' to be replicated }
+  \item{\dots}{ further arguments passed to \code{\link{NextMethod}} }
+}
+\value{
+  \code{\link{rep}} returns a integer64 vector
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{c.integer64}} \code{\link{rep.integer64}} 
+          \code{\link{as.data.frame.integer64}} \code{\link{integer64}}  
+}
+\examples{
+  rep(as.integer64(1:2), 6)
+  rep(as.integer64(1:2), c(6,6))
+  rep(as.integer64(1:2), length.out=6)
+}
diff --git a/man/seq.integer64.rd b/man/seq.integer64.rd
new file mode 100644
index 0000000..8c82040
--- /dev/null
+++ b/man/seq.integer64.rd
@@ -0,0 +1,43 @@
+\name{seq.integer64}
+\alias{seq.integer64}
+\title{
+   integer64: Sequence Generation
+}
+\description{
+  Generating sequence of integer64 values
+}
+\usage{
+\method{seq}{integer64}(from = NULL, to = NULL, by = NULL, length.out = NULL, along.with = NULL, \dots)
+}
+\arguments{
+  \item{from}{ integer64 scalar (in order to dispatch the integer64 method of \code{\link{seq}} }
+  \item{to}{ scalar }
+  \item{by}{ scalar }
+  \item{length.out}{ scalar }
+  \item{along.with}{ scalar }
+  \item{\dots}{ ignored }
+}
+\details{
+  \code{seq.integer64} does coerce its arguments 'from', 'to' and 'by' to \code{integer64}.
+  If not provided, the argument 'by' is automatically determined as \code{+1} or \code{-1},
+  but the size of 'by' is not calculated as in \code{\link{seq}} (because this might result in a non-integer value).
+}
+\value{
+  an integer64 vector with the generated sequence
+}
+\note{
+  In base R \code{\link{:}} currently is not generic and does not dispatch, see section "Limitations inherited from Base R" in \code{\link{integer64}}
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{c.integer64}} \code{\link{rep.integer64}} 
+          \code{\link{as.data.frame.integer64}} \code{\link{integer64}}  
+}
+\examples{
+  # colon not activated: as.integer64(1):12
+  seq(as.integer64(1), 12, 2)
+  seq(as.integer64(1), by=2, length.out=6)
+}
diff --git a/man/sort.integer64.rd b/man/sort.integer64.rd
new file mode 100644
index 0000000..c3350aa
--- /dev/null
+++ b/man/sort.integer64.rd
@@ -0,0 +1,63 @@
+\name{sort.integer64}
+\alias{sort.integer64}
+\alias{order.integer64}
+\title{
+   High-level intger64 methods for sorting and ordering
+}
+\description{
+  Fast high-level methods for sorting and ordering. 
+  These are wrappers to \code{\link{ramsort}} and friends and do not modify their arguments.
+}
+\usage{
+\method{sort}{integer64}(x, decreasing = FALSE, has.na = TRUE, na.last = TRUE, stable = TRUE
+, optimize = c("time", "memory"), VERBOSE = FALSE, \dots)
+\method{order}{integer64}(\dots, na.last = TRUE, decreasing = FALSE, has.na = TRUE, stable = TRUE
+, optimize = c("time", "memory"), VERBOSE = FALSE)
+}
+\arguments{
+  \item{x}{ a vector to be sorted by \code{\link{ramsort}} and \code{\link{ramsortorder}}, i.e. the output of  \code{\link{sort}} }
+  \item{has.na}{
+boolean scalar defining whether the input vector might contain \code{NA}s. If we know we don't have NAs, this may speed-up.
+\emph{Note} that you risk a crash if there are unexpected \code{NA}s with \code{has.na=FALSE}
+}
+  \item{na.last}{
+boolean scalar telling ramsort whether to sort \code{NA}s last or first.
+\emph{Note} that 'boolean' means that there is no third option \code{NA} as in \code{\link{sort}}
+}
+  \item{decreasing}{
+boolean scalar telling ramsort whether to sort increasing or decreasing
+}
+  \item{stable}{
+boolean scalar defining whether stable sorting is needed. Allowing non-stable may speed-up.
+}
+  \item{optimize}{
+by default ramsort optimizes for 'time' which requires more RAM,
+set to 'memory' to minimize RAM requirements and sacrifice speed
+}
+  \item{VERBOSE}{
+  cat some info about chosen method
+}
+  \item{\dots}{ further arguments, passed from generics, ignored in methods }
+}
+\details{
+ see \code{\link{sort}} and \code{\link{order}}
+}
+\value{
+  \code{sort} returns the sorted vector and \code{vector} returns the order positions. 
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ programming }
+\keyword{ manip }
+\seealso{ \code{\link[=sort.integer64]{sort}}, \code{\link{sortcache}} }
+\examples{
+  x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+  x
+  sort(x)
+  message("the following has default optimize='time' which is faster but requires more RAM
+, this calls 'ramorder'")
+  order.integer64(x)
+  message("slower with less RAM, this calls 'ramsortorder'")
+  order.integer64(x, optimize="memory")
+}
diff --git a/man/sortnut.rd b/man/sortnut.rd
new file mode 100644
index 0000000..96834c1
--- /dev/null
+++ b/man/sortnut.rd
@@ -0,0 +1,159 @@
+\name{sortnut}
+\alias{sortnut}
+\alias{sortnut.integer64}
+\alias{ordernut}
+\alias{ordernut.integer64}
+\alias{sortfin}
+\alias{sortfin.integer64}
+\alias{orderpos}
+\alias{orderpos.integer64}
+\alias{orderfin}
+\alias{orderfin.integer64}
+\alias{sortorderpos}
+\alias{sortorderpos.integer64}
+\alias{orderdup}
+\alias{orderdup.integer64}
+\alias{sortorderdup}
+\alias{sortorderdup.integer64}
+\alias{sortuni}
+\alias{sortuni.integer64}
+\alias{orderuni}
+\alias{orderuni.integer64}
+\alias{sortorderuni}
+\alias{sortorderuni.integer64}
+\alias{orderupo}
+\alias{orderupo.integer64}
+\alias{sortorderupo}
+\alias{sortorderupo.integer64}
+\alias{ordertie}
+\alias{ordertie.integer64}
+\alias{sortordertie}
+\alias{sortordertie.integer64}
+\alias{sorttab}
+\alias{sorttab.integer64}
+\alias{ordertab}
+\alias{ordertab.integer64}
+\alias{sortordertab}
+\alias{sortordertab.integer64}
+\alias{orderkey}
+\alias{orderkey.integer64}
+\alias{sortorderkey}
+\alias{sortorderkey.integer64}
+\alias{orderrnk}
+\alias{orderrnk.integer64}
+\alias{sortorderrnk}
+\alias{sortorderrnk.integer64}
+\alias{sortqtl}
+\alias{sortqtl.integer64}
+\alias{orderqtl}
+\alias{orderqtl.integer64}
+\title{
+   Searching and other uses of sorting for 64bit integers
+}
+\description{
+  This is roughly an implementation of hash functionality but based on sorting instead on a hasmap.
+  Since sorting is more informative than hashingwe can do some more interesting things.
+}
+\usage{
+sortnut(sorted, \dots)
+ordernut(table, order, \dots)
+sortfin(sorted, x, \dots)
+orderfin(table, order, x, \dots)
+orderpos(table, order, x, \dots)
+sortorderpos(sorted, order, x, \dots)
+orderdup(table, order, \dots)
+sortorderdup(sorted, order, \dots)
+sortuni(sorted, nunique, \dots)
+orderuni(table, order, nunique, \dots)
+sortorderuni(table, sorted, order, nunique, \dots)
+orderupo(table, order, nunique, \dots)
+sortorderupo(sorted, order, nunique, keep.order = FALSE, \dots)
+ordertie(table, order, nties, \dots)
+sortordertie(sorted, order, nties, \dots)
+sorttab(sorted, nunique, \dots)
+ordertab(table, order, nunique, \dots)
+sortordertab(sorted, order, \dots)
+orderkey(table, order, na.skip.num = 0L, \dots)
+sortorderkey(sorted, order, na.skip.num = 0L, \dots)
+orderrnk(table, order, na.count, \dots)
+sortorderrnk(sorted, order, na.count, \dots)
+\method{sortnut}{integer64}(sorted, \dots)
+\method{ordernut}{integer64}(table, order, \dots)
+\method{sortfin}{integer64}(sorted, x, method=NULL, \dots)
+\method{orderfin}{integer64}(table, order, x, method=NULL, \dots)
+\method{orderpos}{integer64}(table, order, x, nomatch=NA, method=NULL, \dots)
+\method{sortorderpos}{integer64}(sorted, order, x, nomatch=NA, method=NULL, \dots)
+\method{orderdup}{integer64}(table, order, method=NULL, \dots)
+\method{sortorderdup}{integer64}(sorted, order, method=NULL, \dots)
+\method{sortuni}{integer64}(sorted, nunique, \dots)
+\method{orderuni}{integer64}(table, order, nunique, keep.order=FALSE, \dots)
+\method{sortorderuni}{integer64}(table, sorted, order, nunique, \dots)
+\method{orderupo}{integer64}(table, order, nunique, keep.order=FALSE, \dots)
+\method{sortorderupo}{integer64}(sorted, order, nunique, keep.order = FALSE, \dots)
+\method{ordertie}{integer64}(table, order, nties, \dots)
+\method{sortordertie}{integer64}(sorted, order, nties, \dots)
+\method{sorttab}{integer64}(sorted, nunique, \dots)
+\method{ordertab}{integer64}(table, order, nunique, denormalize=FALSE, keep.order=FALSE, \dots)
+\method{sortordertab}{integer64}(sorted, order, denormalize=FALSE, \dots)
+\method{orderkey}{integer64}(table, order, na.skip.num = 0L, \dots)
+\method{sortorderkey}{integer64}(sorted, order, na.skip.num = 0L, \dots)
+\method{orderrnk}{integer64}(table, order, na.count, \dots)
+\method{sortorderrnk}{integer64}(sorted, order, na.count, \dots)
+\method{sortqtl}{integer64}(sorted, na.count, probs, \dots)
+\method{orderqtl}{integer64}(table, order, na.count, probs, \dots)
+}
+\arguments{
+  \item{x}{ an \code{\link{integer64}} vector }
+  \item{sorted}{ a sorted \code{\link{integer64}} vector }
+  \item{table}{ the original data with original order under the sorted vector }
+  \item{order}{ an \code{\link{integer}} order vector that turns 'table' into 'sorted' }
+  \item{nunique}{ number of unique elements, usually we get this from cache or call \code{sortnut} or \code{ordernut} }
+  \item{nties}{ number of tied values, usually we get this from cache or call \code{sortnut} or \code{ordernut} }
+  \item{denormalize}{ FALSE returns counts of unique values, TRUE returns each value with its counts }
+  \item{nomatch}{ the value to be returned if an element is not found in the hashmap }
+  \item{keep.order}{ determines order of results and speed: \code{FALSE} (the default) is faster and returns in sorted order, \code{TRUE} returns in the order of first appearance in the original data, but this requires extra work } 
+  \item{probs}{ vector of probabilities in [0..1] for which we seek quantiles }
+  \item{na.skip.num}{ 0 or the number of \code{NA}s. With 0, \code{NA}s are coded with 1L, with the number of \code{NA}s, these are coded with \code{NA}, the latter needed for \code{\link{as.factor.integer64}} }
+  \item{na.count}{ the number of \code{NA}s, needed for this low-level function algorithm }
+  \item{method}{ see details }
+  \item{\dots}{ further arguments, passed from generics, ignored in methods }
+}
+\details{
+\tabular{rrrrl}{
+   \bold{sortfun} \tab \bold{orderfun} \tab \bold{sortorderfun} \tab \bold{see also}          \tab \bold{description} \cr
+   \code{sortnut} \tab \code{ordernut} \tab                     \tab  \tab return number of tied and of unique values \cr
+   \code{sortfin} \tab \code{orderfin} \tab                     \tab \code{\link{\%in\%.integer64}} \tab return logical whether \code{x} is in \code{table} \cr
+                  \tab \code{orderpos} \tab \code{sortorderpos} \tab \code{\link[=match.integer64]{match}} \tab return positions of \code{x} in \code{table} \cr
+                  \tab \code{orderdup} \tab \code{sortorderdup} \tab \code{\link[=duplicated.integer64]{duplicated}} \tab return logical whether values are duplicated \cr
+   \code{sortuni} \tab \code{orderuni} \tab \code{sortorderuni} \tab \code{\link[=unique.integer64]{unique}} \tab return unique values (=dimensiontable) \cr
+                  \tab \code{orderupo} \tab \code{sortorderupo} \tab \code{\link[=unique.integer64]{unique}} \tab return positions of unique values \cr
+                  \tab \code{ordertie} \tab \code{sortordertie} \tab  \tab return positions of tied values \cr
+                  \tab \code{orderkey} \tab \code{sortorderkey} \tab  \tab positions of values in vector of unique values (match in dimensiontable) \cr
+   \code{sorttab} \tab \code{ordertab} \tab \code{sortordertab} \tab \code{\link[=table.integer64]{table}} \tab tabulate frequency of values  \cr
+                  \tab \code{orderrnk} \tab \code{sortorderrnk} \tab  \tab rank averaging ties \cr
+   \code{sortqtl} \tab \code{orderqtl} \tab                     \tab  \tab return quantiles given probabilities \cr
+}
+The functions \code{sortfin}, \code{orderfin}, \code{orderpos} and \code{sortorderpos} each offer three algorithms for finding \code{x} in \code{table}.  \cr
+With \code{method=1L} each value of \code{x} is searched independently using \emph{binary search}, this is fastest for small \code{table}s. \cr
+With \code{method=2L} the values of \code{x} are first sorted and then searched using \emph{doubly exponential search}, this is the best allround method. \cr
+With \code{method=3L} the values of \code{x} are first sorted and then searched using simple merging, this is the fastest method if \code{table} is huge and \code{x} has similar size and distribution of values. \cr
+With \code{method=NULL} the functions use a heuristic to determine the fastest algorithm. \cr
+
+The functions \code{orderdup} and \code{sortorderdup} each offer two algorithms for setting the truth values in the return vector.  \cr
+With \code{method=1L} the return values are set directly which causes random write access on a possibly large return vector. \cr
+With \code{method=2L} the return values are first set in a smaller bit-vector -- random access limited to a smaller memory region -- and finally written sequentially to the logical output  vector. \cr
+With \code{method=NULL} the functions use a heuristic to determine the fastest algorithm. \cr
+}
+\value{
+  see details
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ programming }
+\keyword{ manip }
+\seealso{ \code{\link[=match.integer64]{match}} }
+\examples{
+ message("check the code of 'optimizer64' for examples:")
+ print(optimizer64)
+}
diff --git a/man/sum.integer64.rd b/man/sum.integer64.rd
new file mode 100644
index 0000000..ba5c44b
--- /dev/null
+++ b/man/sum.integer64.rd
@@ -0,0 +1,53 @@
+\name{sum.integer64}
+\alias{all.integer64}
+\alias{any.integer64}
+\alias{min.integer64}
+\alias{max.integer64}
+\alias{range.integer64}
+\alias{lim.integer64}
+\alias{sum.integer64}
+\alias{prod.integer64}
+\title{
+   Summary functions for integer64 vectors
+}
+\description{
+  Summary functions for integer64 vectors. 
+  Function 'range' without arguments returns the smallest and largest value of the 'integer64' class.
+}
+\usage{
+\method{all}{integer64}(\dots, na.rm = FALSE)
+\method{any}{integer64}(\dots, na.rm = FALSE)
+\method{min}{integer64}(\dots, na.rm = FALSE)
+\method{max}{integer64}(\dots, na.rm = FALSE)
+\method{range}{integer64}(\dots, na.rm = FALSE)
+lim.integer64()
+\method{sum}{integer64}(\dots, na.rm = FALSE)
+\method{prod}{integer64}(\dots, na.rm = FALSE)
+}
+\arguments{
+  \item{\dots}{ atomic vectors of class 'integer64'}
+  \item{na.rm}{ logical scalar indicating whether to ignore NAs }
+}
+\details{
+  The numerical summary methods always return \code{integer64}. 
+  Therefor the methods for \code{min},\code{max} and \code{range} do not return \code{+Inf,-Inf}
+  on empty arguments, but \code{+9223372036854775807, -9223372036854775807} (in this sequence).
+  The same is true if only  \code{NA}s are submitted with argument \code{na.rm=TRUE}. 
+ \cr
+  \code{lim.integer64} returns these limits in proper order \code{-9223372036854775807, +9223372036854775807} and without a \code{\link{warning}}.
+}
+\value{
+  \code{\link{all}} and \code{\link{any}} return a logical scalar\cr
+  \code{\link{range}} returns a integer64 vector with two elements\cr
+  \code{\link{min}}, \code{\link{max}}, \code{\link{sum}} and \code{\link{prod}} return a integer64 scalar
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{mean.integer64}} \code{\link{cumsum.integer64}} \code{\link{integer64}}  }
+\examples{
+  lim.integer64()
+  range(as.integer64(1:12))
+}
diff --git a/man/table.integer64.rd b/man/table.integer64.rd
new file mode 100644
index 0000000..381e312
--- /dev/null
+++ b/man/table.integer64.rd
@@ -0,0 +1,120 @@
+\name{table.integer64}
+\title{Cross Tabulation and Table Creation for integer64}
+\alias{table.integer64}
+
+\concept{counts}
+\concept{frequencies}
+\concept{occurrences}
+\concept{contingency table}
+
+\description{
+  \code{table.integer64} uses the cross-classifying integer64 vectors to build a contingency
+  table of the counts at each combination of vector values.
+}
+\usage{
+table.integer64(\dots
+, return = c("table","data.frame","list")
+, order = c("values","counts")
+, nunique = NULL
+, method = NULL
+, dnn = list.names(...), deparse.level = 1
+) 
+}
+\arguments{
+  \item{\dots}{one or more objects which can be interpreted as factors
+    (including character strings), or a list (or data frame) whose
+    components can be so interpreted.  (For \code{as.table} and
+    \code{as.data.frame}, arguments passed to specific methods.)}
+  \item{nunique}{
+	NULL or the number of unique values of table (including NA). Providing \code{nunique} can speed-up matching when \code{table} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+}
+  \item{order}{
+	By default results are created sorted by "values", or by "counts"
+}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{return}{
+     choose the return format, see details
+}
+  \item{dnn}{the names to be given to the dimensions in the result (the
+    \emph{dimnames names}).}
+  \item{deparse.level}{controls how the default \code{dnn} is
+    constructed.  See \sQuote{Details}.}
+}
+\details{
+  This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+  Suitable methods are \code{\link{hashmaptab}} (simultaneously creating and using a hashmap)
+, \code{\link{hashtab}} (first creating a hashmap then using it)
+, \code{\link{sortordertab}} (fast ordering) 
+and \code{\link{ordertab}} (memory saving ordering).
+\cr
+  If the argument \code{dnn} is not supplied, the internal function
+  \code{list.names} is called to compute the \sQuote{dimname names}.  If the
+  arguments in \code{\dots} are named, those names are used.  For the
+  remaining arguments, \code{deparse.level = 0} gives an empty name,
+  \code{deparse.level = 1} uses the supplied argument if it is a symbol,
+  and \code{deparse.level = 2} will deparse the argument.
+
+  Arguments \code{exclude}, \code{useNA}, are not supported, i.e. \code{NA}s are always tabulated, and, different from \code{\link{table}} they are sorted first if \code{order="values"}. 
+}
+\value{
+  By default (with \code{return="table"}) \code{\link{table}} returns a \emph{contingency table}, an object of
+  class \code{"table"}, an array of integer values. Note that unlike S the result is always an array, a 1D array if one factor is given. Note also that for multidimensional arrays this is a \emph{dense} return structure which can dramatically increase RAM requirements (for large arrays with high mutual information, i.e. many possible input combinations of which only few occur) and that \code{\link{table}} is limited to \code{2^31} possible combinations (e.g. two input vectors with 46340  [...]
+  \cr
+  You can use the other \code{return=} options to cope with these problems, the potential combination limit is increased from \code{2^31} to \code{2^63} with these options, RAM is only rewquired for observed combinations and string conversion is avoided. 
+  \cr
+  With \code{return="data.frame"} you get a \emph{dense} representation as a \code{\link{data.frame}} (like that resulting from \code{as.data.frame(table(...))}) where only observed combinations are listed (each as a data.frame row) with the corresponding frequency counts (the latter as component
+  named by \code{responseName}).  This is the inverse of \code{\link{xtabs}}..
+  \cr
+  With \code{return="list"} you also get a \emph{dense} representation as a simple \code{\link{list}} with components 
+  \item{values }{a integer64 vector of the technically tabulated values, for 1D this is the tabulated values themselves, for kD these are the values representing the potential combinations of input values}
+  \item{counts}{the frequency counts}
+  \item{dims}{only for kD: a list with the vectors of the unique values of the input dimensions}
+}
+\note{
+  Note that by using \code{\link{as.integer64.factor}} we can also input 
+  factors into \code{table.integer64} -- only the \code{\link{levels}} get lost.
+ \cr
+  Note that because of the existence of \code{\link{as.factor.integer64}} 
+the standard \code{\link{table}} function -- within its limits -- can also be used 
+for \code{\link{integer64}}, and especially for combining \code{\link{integer64}} input 
+with other data types.
+}
+\seealso{
+  \code{\link{table}} for more info on the standard version coping with Base R's data types, \code{\link{tabulate}} which can faster tabulate \code{\link{integer}s} with a limited range \code{[1L .. nL not too big]}, \code{\link{unique.integer64}} for the unique values without counting them and \code{\link{unipos.integer64}} for the positions of the unique values. 
+}
+\examples{
+message("pure integer64 examples")
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+y <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+z <- sample(c(rep(NA, 9), letters), 32, TRUE)
+table.integer64(x)
+table.integer64(x, order="counts")
+table.integer64(x, y)
+table.integer64(x, y, return="data.frame")
+
+message("via as.integer64.factor we can use 'table.integer64' also for factors")
+table.integer64(x, as.integer64(as.factor(z)))
+
+message("via as.factor.integer64 we can also use 'table' for integer64")
+table(x)
+table(x, exclude=NULL)
+table(x, z, exclude=NULL)
+
+\dontshow{
+ stopifnot(identical(table.integer64(as.integer64(c(1,1,2))), table(c(1,1,2))))
+ stopifnot(identical(table.integer64(as.integer64(c(1,1,2)),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+ message("the following works with three warnings due to coercion")
+ stopifnot(identical(table.integer64(c(1,1,2)), table(c(1,1,2))))
+ stopifnot(identical(table.integer64(as.integer64(c(1,1,2)),c(3,4,4)), table(c(1,1,2),c(3,4,4))))
+ stopifnot(identical(table.integer64(c(1,1,2),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+ message("the following works because of as.factor.integer64")
+ stopifnot(identical(table(as.integer64(c(1,1,2))), table(c(1,1,2))))  
+ stopifnot(identical(table(as.integer64(c(1,1,2)),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+ stopifnot(identical(table(as.integer64(c(1,1,2)),c(3,4,4)), table(c(1,1,2),c(3,4,4))))
+ stopifnot(identical(table(c(1,1,2),as.integer64(c(3,4,4))), table(c(1,1,2),c(3,4,4))))
+}
+
+}
+\keyword{category}
diff --git a/man/tiepos.rd b/man/tiepos.rd
new file mode 100644
index 0000000..abdb2ac
--- /dev/null
+++ b/man/tiepos.rd
@@ -0,0 +1,43 @@
+\name{tiepos}
+\alias{tiepos}
+\alias{tiepos.integer64}
+\title{Extract Positions of Tied Elements}
+\description{
+  \code{tiepos} returns the positions of those elements that participate in ties.
+}
+\usage{
+tiepos(x, \dots)
+\method{tiepos}{integer64}(x, nties = NULL, method = NULL, \dots)
+}
+\arguments{
+  \item{x}{a vector or a data frame or an array or \code{NULL}.}
+  \item{nties}{
+	NULL or the number of tied values (including NA). Providing \code{nties} can speed-up when \code{x} has no cache. Note that a wrong nties can cause undefined behaviour up to a crash.
+}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{\dots}{ignored}
+}
+\details{
+  This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+  Suitable methods are \code{\link{sortordertie}} (fast ordering) 
+and \code{\link{ordertie}} (memory saving ordering).
+}
+\value{
+  an integer vector of positions
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+  \code{\link{rank.integer64}} for possibly tied ranks and \code{\link{unipos.integer64}} for positions of unique values.
+}
+\examples{
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+tiepos(x)
+
+stopifnot(identical(tiepos(x),  (1:length(x))[duplicated(x) | rev(duplicated(rev(x)))]))
+}
+\keyword{manip}
+\keyword{univar}
diff --git a/man/unipos.rd b/man/unipos.rd
new file mode 100644
index 0000000..5030a31
--- /dev/null
+++ b/man/unipos.rd
@@ -0,0 +1,57 @@
+\name{unipos}
+\alias{unipos}
+\alias{unipos.integer64}
+\title{Extract Positions of Unique Elements}
+\description{
+  \code{unipos} returns the positions of those elements returned by \code{\link{unique}}.
+}
+\usage{
+unipos(x, incomparables = FALSE, order = c("original","values","any"), \dots)
+\method{unipos}{integer64}(x, incomparables = FALSE, order = c("original","values","any")
+, nunique = NULL, method = NULL, \dots)
+}
+\arguments{
+  \item{x}{a vector or a data frame or an array or \code{NULL}.}
+  \item{incomparables}{ignored}
+  \item{order}{The order in which positions of unique values will be returned, see details}
+  \item{nunique}{
+	NULL or the number of unique values (including NA). Providing \code{nunique} can speed-up when \code{x} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{\dots}{ignored}
+}
+\details{
+  This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+  Suitable methods are \code{\link{hashmapupo}} (simultaneously creating and using a hashmap)
+, \code{\link{hashupo}} (first creating a hashmap then using it)
+, \code{\link{sortorderupo}} (fast ordering) 
+and \code{\link{orderupo}} (memory saving ordering).
+\cr
+The default \code{order="original"} collects unique values in the order of the first appearance in \code{x} like in \code{\link{unique}}, this costs extra processing. 
+\code{order="values"} collects unique values in sorted order like in \code{\link{table}}, this costs extra processing with the hash methods but comes for free. 
+\code{order="any"} collects unique values in undefined order, possibly faster. For hash methods this will be a quasi random order, for sort methods this will be sorted order.
+}
+\value{
+  an integer vector of positions
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+  \code{\link{unique.integer64}} for unique values and \code{\link{match.integer64}} for general matching.
+}
+\examples{
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+unipos(x)
+unipos(x, order="values")
+
+stopifnot(identical(unipos(x),  (1:length(x))[!duplicated(x)]))
+stopifnot(identical(unipos(x),  match.integer64(unique(x), x)))
+stopifnot(identical(unipos(x, order="values"),  match.integer64(unique(x, order="values"), x)))
+stopifnot(identical(unique(x),  x[unipos(x)]))
+stopifnot(identical(unique(x, order="values"),  x[unipos(x, order="values")]))
+}
+\keyword{manip}
+\keyword{logic}
diff --git a/man/unique.integer64.rd b/man/unique.integer64.rd
new file mode 100644
index 0000000..b98fe12
--- /dev/null
+++ b/man/unique.integer64.rd
@@ -0,0 +1,58 @@
+\name{unique.integer64}
+\alias{unique.integer64}
+\title{Extract Unique Elements from integer64}
+\description{
+  \code{unique} returns a vector like \code{x} but with duplicate elements/rows removed.
+}
+\usage{
+\method{unique}{integer64}(x, incomparables = FALSE, order = c("original","values","any")
+, nunique = NULL, method = NULL, \dots)
+}
+\arguments{
+  \item{x}{a vector or a data frame or an array or \code{NULL}.}
+  \item{incomparables}{ignored}
+  \item{order}{The order in which unique values will be returned, see details}
+  \item{nunique}{
+	NULL or the number of unique values (including NA). Providing \code{nunique} can speed-up matching when \code{x} has no cache. Note that a wrong nunique can cause undefined behaviour up to a crash.
+}
+  \item{method}{
+	NULL for automatic method selection or a suitable low-level method, see details
+}
+  \item{\dots}{ignored}
+}
+\details{
+  This function automatically chooses from several low-level functions considering the size of \code{x} and the availability of a cache. 
+  Suitable methods are \code{\link{hashmapuni}} (simultaneously creating and using a hashmap)
+, \code{\link{hashuni}} (first creating a hashmap then using it)
+, \code{\link{sortuni}} (fast sorting for sorted order only)
+, \code{\link{sortorderuni}} (fast ordering for original order only) 
+and \code{\link{orderuni}} (memory saving ordering).
+\cr
+The default \code{order="original"} returns unique values in the order of the first appearance in \code{x} like in \code{\link{unique}}, this costs extra processing. 
+\code{order="values"} returns unique values in sorted order like in \code{\link{table}}, this costs extra processing with the hash methods but comes for free. 
+\code{order="any"} returns unique values in undefined order, possibly faster. For hash methods this will be a quasi random order, for sort methods this will be sorted order.
+}
+\value{
+  For a vector, an object of the same type of \code{x}, but with only
+  one copy of each duplicated element.  No attributes are copied (so
+  the result has no names).
+}
+\author{
+	Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\seealso{
+  \code{\link{unique}} for the generic, \code{\link{unipos}} which gives the indices of the unique
+  elements and \code{\link{table.integer64}} which gives frequencies of the unique elements.
+}
+\examples{
+x <- as.integer64(sample(c(rep(NA, 9), 1:9), 32, TRUE))
+unique(x)
+unique(x, order="values")
+
+stopifnot(identical(unique(x),  x[!duplicated(x)]))
+stopifnot(identical(unique(x),  as.integer64(unique(as.integer(x)))))
+stopifnot(identical(unique(x, order="values")
+,  as.integer64(sort(unique(as.integer(x)), na.last=FALSE))))
+}
+\keyword{manip}
+\keyword{logic}
diff --git a/man/xor.integer64.rd b/man/xor.integer64.rd
new file mode 100644
index 0000000..53f9e63
--- /dev/null
+++ b/man/xor.integer64.rd
@@ -0,0 +1,65 @@
+\name{xor.integer64}
+\alias{&.integer64}
+\alias{|.integer64}
+\alias{xor.integer64}
+\alias{!=.integer64}
+\alias{==.integer64}
+\alias{<.integer64}
+\alias{<=.integer64}
+\alias{>.integer64}
+\alias{>=.integer64}
+\alias{+.integer64}
+\alias{-.integer64}
+\alias{*.integer64}
+\alias{^.integer64}
+\alias{/.integer64}
+\alias{\%/\%.integer64}
+\alias{\%\%.integer64}
+\alias{binattr}
+\title{
+   Binary operators for integer64 vectors
+}
+\description{
+  Binary operators for integer64 vectors.
+}
+\usage{
+\method{&}{integer64}(e1,e2)
+\method{|}{integer64}(e1,e2)
+\method{xor}{integer64}(x,y)
+\method{!=}{integer64}(e1,e2)
+\method{==}{integer64}(e1,e2)
+\method{<}{integer64}(e1,e2)
+\method{<=}{integer64}(e1,e2)
+\method{>}{integer64}(e1,e2)
+\method{>=}{integer64}(e1,e2)
+\method{+}{integer64}(e1,e2)
+\method{-}{integer64}(e1,e2)
+\method{*}{integer64}(e1,e2)
+\method{^}{integer64}(e1,e2)
+\method{/}{integer64}(e1,e2)
+\method{\%/\%}{integer64}(e1,e2)
+\method{\%\%}{integer64}(e1,e2)
+binattr(e1,e2) # for internal use only
+}
+\arguments{
+  \item{e1}{ an atomic vector of class 'integer64'}
+  \item{e2}{ an atomic vector of class 'integer64'}
+  \item{x}{ an atomic vector of class 'integer64'}
+  \item{y}{ an atomic vector of class 'integer64'}
+}
+\value{
+  \code{\link{&}}, \code{\link{|}}, \code{\link{xor}}, \code{\link{!=}}, \code{\link{==}}, 
+  \code{\link{<}}, \code{\link{<=}}, \code{\link{>}}, \code{\link{>=}} return a logical vector \cr
+  \code{\link{^}} and \code{\link{/}} return a double vector\cr
+  \code{\link{+}}, \code{\link{-}}, \code{\link{*}}, \code{\link{\%/\%}}, \code{\link{\%\%}}
+   return a vector of class 'integer64'
+}
+\author{
+Jens Oehlschlägel <Jens.Oehlschlaegel at truecluster.com>
+}
+\keyword{ classes }
+\keyword{ manip }
+\seealso{ \code{\link{format.integer64}} \code{\link{integer64}}  }
+\examples{
+  as.integer64(1:12) - 1
+}
diff --git a/src/Makevars b/src/Makevars
new file mode 100644
index 0000000..3dfdd26
--- /dev/null
+++ b/src/Makevars
@@ -0,0 +1 @@
+#PKG_CFLAGS=-pedantic -O4
diff --git a/src/Makevars.win b/src/Makevars.win
new file mode 100644
index 0000000..071c865
--- /dev/null
+++ b/src/Makevars.win
@@ -0,0 +1 @@
+#PKG_CFLAGS=-pedantic -O3
diff --git a/src/bsearch.c b/src/bsearch.c
new file mode 100644
index 0000000..78f747a
--- /dev/null
+++ b/src/bsearch.c
@@ -0,0 +1,267 @@
+/*
+# C-Code for binary search
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+*/
+
+#include "bsearch.h"
+
+IndexT integer64_bsearch_asc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_ASC_EQ(data, l, r, value, return )
+}
+
+IndexT integer64_bsearch_asc_GE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_ASC_GE(data, l, r, value, return )
+}
+
+IndexT integer64_bsearch_asc_GT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_ASC_GT(data, l, r, value, return )
+}
+
+IndexT integer64_bsearch_asc_LE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_ASC_LE(data, l, r, value, return )
+}
+
+IndexT integer64_bsearch_asc_LT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_ASC_LT(data, l, r, value, return )
+}
+
+
+IndexT integer64_bsearch_desc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_DESC_EQ(data, l, r, value, return )
+}
+
+IndexT integer64_bsearch_desc_GE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_DESC_GE(data, l, r, value, return )
+}
+
+IndexT integer64_bsearch_desc_GT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_DESC_GT(data, l, r, value, return )
+}
+
+IndexT integer64_bsearch_desc_LE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_DESC_LE(data, l, r, value, return )
+}
+
+IndexT integer64_bsearch_desc_LT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BSEARCH_DESC_LT(data, l, r, value, return )
+}
+
+
+
+IndexT integer64_lsearch_asc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_ASC_EQ(data, l, r, value, return )
+}
+
+IndexT integer64_lsearch_asc_GE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_ASC_GE(data, l, r, value, return )
+}
+
+IndexT integer64_lsearch_asc_GT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_ASC_GT(data, l, r, value, return )
+}
+
+IndexT integer64_lsearch_asc_LE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_ASC_LE(data, l, r, value, return )
+}
+
+IndexT integer64_lsearch_asc_LT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_ASC_LT(data, l, r, value, return )
+}
+
+
+IndexT integer64_lsearch_desc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_DESC_EQ(data, l, r, value, return )
+}
+
+IndexT integer64_lsearch_desc_GE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_DESC_GE(data, l, r, value, return )
+}
+
+IndexT integer64_lsearch_desc_GT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_DESC_GT(data, l, r, value, return )
+}
+
+IndexT integer64_lsearch_desc_LE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_DESC_LE(data, l, r, value, return )
+}
+
+IndexT integer64_lsearch_desc_LT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LSEARCH_DESC_LT(data, l, r, value, return )
+}
+
+
+
+IndexT integer64_rsearch_asc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_ASC_EQ(data, l, r, value, return )
+}
+
+IndexT integer64_rsearch_asc_GE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_ASC_GE(data, l, r, value, return )
+}
+
+IndexT integer64_rsearch_asc_GT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_ASC_GT(data, l, r, value, return )
+}
+
+IndexT integer64_rsearch_asc_LE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_ASC_LE(data, l, r, value, return )
+}
+
+IndexT integer64_rsearch_asc_LT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_ASC_LT(data, l, r, value, return )
+}
+
+
+IndexT integer64_rsearch_desc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_DESC_EQ(data, l, r, value, return )
+}
+
+IndexT integer64_rsearch_desc_GE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_DESC_GE(data, l, r, value, return )
+}
+
+IndexT integer64_rsearch_desc_GT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_DESC_GT(data, l, r, value, return )
+}
+
+IndexT integer64_rsearch_desc_LE(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_DESC_LE(data, l, r, value, return )
+}
+
+IndexT integer64_rsearch_desc_LT(ValueT *data, IndexT l, IndexT r, ValueT value){
+  INTEGER64_RSEARCH_DESC_LT(data, l, r, value, return )
+}
+
+
+
+
+IndexT integer64_bosearch_asc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_ASC_EQ(data, index, l, r, value, return)
+}
+
+IndexT integer64_bosearch_asc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_ASC_GE(data, index, l, r, value, return)
+}
+
+IndexT integer64_bosearch_asc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_ASC_GT(data, index, l, r, value, return)
+}
+
+IndexT integer64_bosearch_asc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_ASC_LE(data, index, l, r, value, return)
+}
+
+IndexT integer64_bosearch_asc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_ASC_LT(data, index, l, r, value, return)
+}
+
+
+IndexT integer64_bosearch_desc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_DESC_EQ(data, index, l, r, value, return)
+}
+
+IndexT integer64_bosearch_desc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_DESC_GE(data, index, l, r, value, return)
+}
+
+IndexT integer64_bosearch_desc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_DESC_GT(data, index, l, r, value, return)
+}
+
+IndexT integer64_bosearch_desc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_DESC_LE(data, index, l, r, value, return)
+}
+
+IndexT integer64_bosearch_desc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_BOSEARCH_DESC_LT(data, index, l, r, value, return)
+}
+
+
+
+IndexT integer64_losearch_asc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_ASC_EQ(data, index, l, r, value, return)
+}
+
+IndexT integer64_losearch_asc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_ASC_GE(data, index, l, r, value, return)
+}
+
+IndexT integer64_losearch_asc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_ASC_GT(data, index, l, r, value, return)
+}
+
+IndexT integer64_losearch_asc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_ASC_LE(data, index, l, r, value, return)
+}
+
+IndexT integer64_losearch_asc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_ASC_LT(data, index, l, r, value, return)
+}
+
+
+IndexT integer64_losearch_desc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_DESC_EQ(data, index, l, r, value, return)
+}
+
+IndexT integer64_losearch_desc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_DESC_GE(data, index, l, r, value, return)
+}
+
+IndexT integer64_losearch_desc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_DESC_GT(data, index, l, r, value, return)
+}
+
+IndexT integer64_losearch_desc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_DESC_LE(data, index, l, r, value, return)
+}
+
+IndexT integer64_losearch_desc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_LOSEARCH_DESC_LT(data, index, l, r, value, return)
+}
+
+
+
+IndexT integer64_rosearch_asc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_ASC_EQ(data, index, l, r, value, return)
+}
+
+IndexT integer64_rosearch_asc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_ASC_GE(data, index, l, r, value, return)
+}
+
+IndexT integer64_rosearch_asc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_ASC_GT(data, index, l, r, value, return)
+}
+
+IndexT integer64_rosearch_asc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_ASC_LE(data, index, l, r, value, return)
+}
+
+IndexT integer64_rosearch_asc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_ASC_LT(data, index, l, r, value, return)
+}
+
+
+IndexT integer64_rosearch_desc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_DESC_EQ(data, index, l, r, value, return)
+}
+
+IndexT integer64_rosearch_desc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_DESC_GE(data, index, l, r, value, return)
+}
+
+IndexT integer64_rosearch_desc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_DESC_GT(data, index, l, r, value, return)
+}
+
+IndexT integer64_rosearch_desc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_DESC_LE(data, index, l, r, value, return)
+}
+
+IndexT integer64_rosearch_desc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value){
+  INTEGER64_ROSEARCH_DESC_LT(data, index, l, r, value, return)
+}
diff --git a/src/bsearch.h b/src/bsearch.h
new file mode 100644
index 0000000..b01b604
--- /dev/null
+++ b/src/bsearch.h
@@ -0,0 +1,1245 @@
+/*
+# C-Header for binary search
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+*/
+
+#include "sort64.h"
+
+#define INTEGER64_BSEARCH_ASC_DOWN(data, l, r, value) \
+{ \
+IndexT m; \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(data[m], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_BSEARCH_ASC_UP(data, l, r, value) \
+{ \
+IndexT m; \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(value, data[m])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+#define INTEGER64_LSEARCH_ASC_DOWN(data, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = l - 1 + d; \
+     m = l + ((r - l) / 2); \
+     if (g<m){ \
+       if (LESS(data[g], value)){ \
+         l = g + 1; \
+         d *= 2; \
+       }else{ \
+         r = g; \
+         break; \
+       } \
+     }else{ \
+       if (LESS(data[m], value)) \
+         l = m + 1; \
+       else \
+         r = m; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(data[m], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_LSEARCH_ASC_UP(data, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = l - 1 + d; \
+     m = l + ((r - l) / 2); \
+     if (g<m){ \
+       if (LESS(value, data[g])){ \
+         r = g; \
+         break; \
+       }else{ \
+         l = g + 1; \
+         d *= 2; \
+       } \
+     }else{ \
+       if (LESS(value, data[m])) \
+         r = m; \
+       else \
+         l = m + 1; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(value, data[m])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+
+#define INTEGER64_RSEARCH_ASC_DOWN(data, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = r - d; \
+     m = l + ((r - l) / 2); \
+     if (g>m){ \
+       if (LESS(data[g], value)){ \
+         l = g + 1; \
+         break; \
+       }else{ \
+         r = g; \
+         d *= 2; \
+       } \
+     }else{ \
+       if (LESS(data[m], value)) \
+         l = m + 1; \
+       else \
+         r = m; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(data[m], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_RSEARCH_ASC_UP(data, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = r - d; \
+     m = l + ((r - l) / 2); \
+     if (g>m){ \
+       if (LESS(value, data[g])){ \
+         r = g; \
+         d *= 2; \
+       }else{ \
+         l = g + 1; \
+         break; \
+       } \
+     }else{ \
+       if (LESS(value, data[m])) \
+         r = m; \
+       else \
+         l = m + 1; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(value, data[m])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+// desc is a clone of asc with LESS replaced by GREATER
+
+#define INTEGER64_BSEARCH_DESC_DOWN(data, l, r, value) \
+{ \
+IndexT m; \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(data[m], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_BSEARCH_DESC_UP(data, l, r, value) \
+{ \
+IndexT m; \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(value, data[m])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+#define INTEGER64_LSEARCH_DESC_DOWN(data, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = l - 1 + d; \
+     m = l + ((r - l) / 2); \
+     if (g<m){ \
+       if (GREATER(data[g], value)){ \
+         l = g + 1; \
+         d *= 2; \
+       }else{ \
+         r = g; \
+         break; \
+       } \
+     }else{ \
+       if (GREATER(data[m], value)) \
+         l = m + 1; \
+       else \
+         r = m; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(data[m], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_LSEARCH_DESC_UP(data, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = l - 1 + d; \
+     m = l + ((r - l) / 2); \
+     if (g<m){ \
+       if (GREATER(value, data[g])){ \
+         r = g; \
+         break; \
+       }else{ \
+         l = g + 1; \
+         d *= 2; \
+       } \
+     }else{ \
+       if (GREATER(value, data[m])) \
+         r = m; \
+       else \
+         l = m + 1; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(value, data[m])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+
+#define INTEGER64_RSEARCH_DESC_DOWN(data, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = r - d; \
+     m = l + ((r - l) / 2); \
+     if (g>m){ \
+       if (GREATER(data[g], value)){ \
+         l = g + 1; \
+         break; \
+       }else{ \
+         r = g; \
+         d *= 2; \
+       } \
+     }else{ \
+       if (GREATER(data[m], value)) \
+         l = m + 1; \
+       else \
+         r = m; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(data[m], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_RSEARCH_DESC_UP(data, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = r - d; \
+     m = l + ((r - l) / 2); \
+     if (g>m){ \
+       if (GREATER(value, data[g])){ \
+         r = g; \
+         d *= 2; \
+       }else{ \
+         l = g + 1; \
+         break; \
+       } \
+     }else{ \
+       if (GREATER(value, data[m])) \
+         r = m; \
+       else \
+         l = m + 1; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(value, data[m])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+
+
+
+
+
+#define INTEGER64_BSEARCH_ASC_EQ(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret -1; \
+  else if (LESS(data[l], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_BSEARCH_ASC_GE(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_BSEARCH_ASC_GT(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_ASC_UP(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+#define INTEGER64_BSEARCH_ASC_LE(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_ASC_UP(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l-1; \
+  else \
+    ret r;   \
+
+
+#define INTEGER64_BSEARCH_ASC_LT(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret r;   \
+  else \
+    ret l-1; \
+
+
+
+#define INTEGER64_BSEARCH_DESC_EQ(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_DESC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret -1; \
+  else if (LESS(data[l], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_BSEARCH_DESC_GE(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_DESC_UP(data, l, r, value) \
+ if (LESS(data[l], value)) \
+    ret l-1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_BSEARCH_DESC_GT(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_DESC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l;   \
+  else \
+    ret l-1; \
+
+
+#define INTEGER64_BSEARCH_DESC_LE(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_DESC_DOWN(data, l, r, value) \
+ if (LESS(value, data[l])) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+
+#define INTEGER64_BSEARCH_DESC_LT(data, l, r, value, ret) \
+  INTEGER64_BSEARCH_DESC_UP(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+
+
+#define INTEGER64_LSEARCH_ASC_EQ(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret -1; \
+  else if (LESS(data[l], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_LSEARCH_ASC_GE(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_LSEARCH_ASC_GT(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_ASC_UP(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+#define INTEGER64_LSEARCH_ASC_LE(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_ASC_UP(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l-1; \
+  else \
+    ret r;   \
+
+
+#define INTEGER64_LSEARCH_ASC_LT(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret r;   \
+  else \
+    ret l-1; \
+
+
+
+#define INTEGER64_LSEARCH_DESC_EQ(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_DESC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret -1; \
+  else if (LESS(data[l], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_LSEARCH_DESC_GE(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_DESC_UP(data, l, r, value) \
+ if (LESS(data[l], value)) \
+    ret l-1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_LSEARCH_DESC_GT(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_DESC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l;   \
+  else \
+    ret l-1; \
+
+
+#define INTEGER64_LSEARCH_DESC_LE(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_DESC_DOWN(data, l, r, value) \
+ if (LESS(value, data[l])) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+
+#define INTEGER64_LSEARCH_DESC_LT(data, l, r, value, ret) \
+  INTEGER64_LSEARCH_DESC_UP(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+
+
+
+
+#define INTEGER64_RSEARCH_ASC_EQ(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret -1; \
+  else if (LESS(data[l], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_RSEARCH_ASC_GE(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_RSEARCH_ASC_GT(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_ASC_UP(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+#define INTEGER64_RSEARCH_ASC_LE(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_ASC_UP(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l-1; \
+  else \
+    ret r;   \
+
+
+#define INTEGER64_RSEARCH_ASC_LT(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_ASC_DOWN(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret r;   \
+  else \
+    ret l-1; \
+
+
+
+#define INTEGER64_RSEARCH_DESC_EQ(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_DESC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret -1; \
+  else if (LESS(data[l], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_RSEARCH_DESC_GE(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_DESC_UP(data, l, r, value) \
+ if (LESS(data[l], value)) \
+    ret l-1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_RSEARCH_DESC_GT(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_DESC_DOWN(data, l, r, value) \
+  if (LESS(value, data[l])) \
+    ret l;   \
+  else \
+    ret l-1; \
+
+
+#define INTEGER64_RSEARCH_DESC_LE(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_DESC_DOWN(data, l, r, value) \
+ if (LESS(value, data[l])) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+
+#define INTEGER64_RSEARCH_DESC_LT(data, l, r, value, ret) \
+  INTEGER64_RSEARCH_DESC_UP(data, l, r, value) \
+  if (LESS(data[l], value)) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+
+IndexT integer64_bsearch_asc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bsearch_asc_GE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bsearch_asc_GT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bsearch_asc_LE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bsearch_asc_LT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+
+IndexT integer64_bsearch_desc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bsearch_desc_GE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bsearch_desc_GT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bsearch_desc_LE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bsearch_desc_LT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+
+
+IndexT integer64_lsearch_asc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_lsearch_asc_GE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_lsearch_asc_GT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_lsearch_asc_LE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_lsearch_asc_LT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+
+IndexT integer64_lsearch_desc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_lsearch_desc_GE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_lsearch_desc_GT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_lsearch_desc_LE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_lsearch_desc_LT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+
+
+IndexT integer64_rsearch_asc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rsearch_asc_GE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rsearch_asc_GT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rsearch_asc_LE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rsearch_asc_LT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+
+IndexT integer64_rsearch_desc_EQ(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rsearch_desc_GE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rsearch_desc_GT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rsearch_desc_LE(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rsearch_desc_LT(ValueT *data, IndexT l, IndexT r, ValueT value);
+
+
+
+
+#define INTEGER64_BOSEARCH_ASC_DOWN(data, index, l, r, value) \
+{ \
+IndexT m; \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(data[index[m]], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_BOSEARCH_ASC_UP(data, index, l, r, value) \
+{ \
+IndexT m; \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(value, data[index[m]])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+#define INTEGER64_LOSEARCH_ASC_DOWN(data, index, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = l - 1 + d; \
+     m = l + ((r - l) / 2); \
+     if (g<m){ \
+       if (LESS(data[index[g]], value)){ \
+         l = g + 1; \
+         d *= 2; \
+       }else{ \
+         r = g; \
+         break; \
+       } \
+     }else{ \
+       if (LESS(data[index[m]], value)) \
+         l = m + 1; \
+       else \
+         r = m; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(data[index[m]], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_LOSEARCH_ASC_UP(data, index, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = l - 1 + d; \
+     m = l + ((r - l) / 2); \
+     if (g<m){ \
+       if (LESS(value, data[index[g]])){ \
+         r = g; \
+         break; \
+       }else{ \
+         l = g + 1; \
+         d *= 2; \
+       } \
+     }else{ \
+       if (LESS(value, data[index[m]])) \
+         r = m; \
+       else \
+         l = m + 1; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(value, data[index[m]])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+
+#define INTEGER64_ROSEARCH_ASC_DOWN(data, index, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = r - d; \
+     m = l + ((r - l) / 2); \
+     if (g>m){ \
+       if (LESS(data[index[g]], value)){ \
+         l = g + 1; \
+         break; \
+       }else{ \
+         r = g; \
+         d *= 2; \
+       } \
+     }else{ \
+       if (LESS(data[index[m]], value)) \
+         l = m + 1; \
+       else \
+         r = m; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(data[index[m]], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_ROSEARCH_ASC_UP(data, index, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = r - d; \
+     m = l + ((r - l) / 2); \
+     if (g>m){ \
+       if (LESS(value, data[index[g]])){ \
+         r = g; \
+         d *= 2; \
+       }else{ \
+         l = g + 1; \
+         break; \
+       } \
+     }else{ \
+       if (LESS(value, data[index[m]])) \
+         r = m; \
+       else \
+         l = m + 1; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (LESS(value, data[index[m]])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+// desc is a clone of asc with LESS replaced by GREATER
+
+#define INTEGER64_BOSEARCH_DESC_DOWN(data, index, l, r, value) \
+{ \
+IndexT m; \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(data[index[m]], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_BOSEARCH_DESC_UP(data, index, l, r, value) \
+{ \
+IndexT m; \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(value, data[index[m]])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+#define INTEGER64_LOSEARCH_DESC_DOWN(data, index, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = l - 1 + d; \
+     m = l + ((r - l) / 2); \
+     if (g<m){ \
+       if (GREATER(data[index[g]], value)){ \
+         l = g + 1; \
+         d *= 2; \
+       }else{ \
+         r = g; \
+         break; \
+       } \
+     }else{ \
+       if (GREATER(data[index[m]], value)) \
+         l = m + 1; \
+       else \
+         r = m; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(data[index[m]], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_LOSEARCH_DESC_UP(data, index, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = l - 1 + d; \
+     m = l + ((r - l) / 2); \
+     if (g<m){ \
+       if (GREATER(value, data[index[g]])){ \
+         r = g; \
+         break; \
+       }else{ \
+         l = g + 1; \
+         d *= 2; \
+       } \
+     }else{ \
+       if (GREATER(value, data[index[m]])) \
+         r = m; \
+       else \
+         l = m + 1; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(value, data[index[m]])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+
+#define INTEGER64_ROSEARCH_DESC_DOWN(data, index, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = r - d; \
+     m = l + ((r - l) / 2); \
+     if (g>m){ \
+       if (GREATER(data[index[g]], value)){ \
+         l = g + 1; \
+         break; \
+       }else{ \
+         r = g; \
+         d *= 2; \
+       } \
+     }else{ \
+       if (GREATER(data[index[m]], value)) \
+         l = m + 1; \
+       else \
+         r = m; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(data[index[m]], value)) \
+      l = m + 1; \
+    else \
+      r = m; \
+  } \
+}
+#define INTEGER64_ROSEARCH_DESC_UP(data, index, l, r, value) \
+{ \
+  IndexT m,g,d=1; \
+  while (l<r){ \
+     g = r - d; \
+     m = l + ((r - l) / 2); \
+     if (g>m){ \
+       if (GREATER(value, data[index[g]])){ \
+         r = g; \
+         d *= 2; \
+       }else{ \
+         l = g + 1; \
+         break; \
+       } \
+     }else{ \
+       if (GREATER(value, data[index[m]])) \
+         r = m; \
+       else \
+         l = m + 1; \
+       break; \
+     } \
+  } \
+  while (l<r){ \
+    m = l + ((r - l) / 2); \
+    if (GREATER(value, data[index[m]])) \
+      r = m; \
+    else \
+      l = m + 1; \
+  } \
+}
+
+
+
+
+
+
+#define INTEGER64_BOSEARCH_ASC_EQ(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret -1; \
+  else if (LESS(data[index[l]], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_BOSEARCH_ASC_GE(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_BOSEARCH_ASC_GT(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_ASC_UP(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+#define INTEGER64_BOSEARCH_ASC_LE(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_ASC_UP(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l-1; \
+  else \
+    ret r;   \
+
+
+#define INTEGER64_BOSEARCH_ASC_LT(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret r;   \
+  else \
+    ret l-1; \
+
+
+
+#define INTEGER64_BOSEARCH_DESC_EQ(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_DESC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret -1; \
+  else if (LESS(data[index[l]], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_BOSEARCH_DESC_GE(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_DESC_UP(data, index, l, r, value) \
+ if (LESS(data[index[l]], value)) \
+    ret l-1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_BOSEARCH_DESC_GT(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_DESC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l;   \
+  else \
+    ret l-1; \
+
+
+#define INTEGER64_BOSEARCH_DESC_LE(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_DESC_DOWN(data, index, l, r, value) \
+ if (LESS(value, data[index[l]])) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+
+#define INTEGER64_BOSEARCH_DESC_LT(data, index, l, r, value, ret) \
+  INTEGER64_BOSEARCH_DESC_UP(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+
+
+#define INTEGER64_LOSEARCH_ASC_EQ(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret -1; \
+  else if (LESS(data[index[l]], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_LOSEARCH_ASC_GE(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_LOSEARCH_ASC_GT(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_ASC_UP(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+#define INTEGER64_LOSEARCH_ASC_LE(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_ASC_UP(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l-1; \
+  else \
+    ret r;   \
+
+
+#define INTEGER64_LOSEARCH_ASC_LT(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret r;   \
+  else \
+    ret l-1; \
+
+
+
+#define INTEGER64_LOSEARCH_DESC_EQ(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_DESC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret -1; \
+  else if (LESS(data[index[l]], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_LOSEARCH_DESC_GE(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_DESC_UP(data, index, l, r, value) \
+ if (LESS(data[index[l]], value)) \
+    ret l-1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_LOSEARCH_DESC_GT(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_DESC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l;   \
+  else \
+    ret l-1; \
+
+
+#define INTEGER64_LOSEARCH_DESC_LE(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_DESC_DOWN(data, index, l, r, value) \
+ if (LESS(value, data[index[l]])) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+
+#define INTEGER64_LOSEARCH_DESC_LT(data, index, l, r, value, ret) \
+  INTEGER64_LOSEARCH_DESC_UP(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+
+
+
+
+#define INTEGER64_ROSEARCH_ASC_EQ(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret -1; \
+  else if (LESS(data[index[l]], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_ROSEARCH_ASC_GE(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_ROSEARCH_ASC_GT(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_ASC_UP(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+#define INTEGER64_ROSEARCH_ASC_LE(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_ASC_UP(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l-1; \
+  else \
+    ret r;   \
+
+
+#define INTEGER64_ROSEARCH_ASC_LT(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_ASC_DOWN(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret r;   \
+  else \
+    ret l-1; \
+
+
+
+#define INTEGER64_ROSEARCH_DESC_EQ(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_DESC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret -1; \
+  else if (LESS(data[index[l]], value)) \
+    ret -1; \
+  else \
+    ret l;  \
+
+#define INTEGER64_ROSEARCH_DESC_GE(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_DESC_UP(data, index, l, r, value) \
+ if (LESS(data[index[l]], value)) \
+    ret l-1; \
+  else \
+    ret l;   \
+
+#define INTEGER64_ROSEARCH_DESC_GT(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_DESC_DOWN(data, index, l, r, value) \
+  if (LESS(value, data[index[l]])) \
+    ret l;   \
+  else \
+    ret l-1; \
+
+
+#define INTEGER64_ROSEARCH_DESC_LE(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_DESC_DOWN(data, index, l, r, value) \
+ if (LESS(value, data[index[l]])) \
+    ret r+1; \
+  else \
+    ret l;   \
+
+
+#define INTEGER64_ROSEARCH_DESC_LT(data, index, l, r, value, ret) \
+  INTEGER64_ROSEARCH_DESC_UP(data, index, l, r, value) \
+  if (LESS(data[index[l]], value)) \
+    ret l;   \
+  else \
+    ret r+1; \
+
+
+
+IndexT integer64_bosearch_asc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bosearch_asc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bosearch_asc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bosearch_asc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bosearch_asc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+
+IndexT integer64_bosearch_desc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bosearch_desc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bosearch_desc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bosearch_desc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_bosearch_desc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+
+
+IndexT integer64_losearch_asc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_losearch_asc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_losearch_asc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_losearch_asc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_losearch_asc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+
+IndexT integer64_losearch_desc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_losearch_desc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_losearch_desc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_losearch_desc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_losearch_desc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+
+
+IndexT integer64_rosearch_asc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rosearch_asc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rosearch_asc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rosearch_asc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rosearch_asc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+
+IndexT integer64_rosearch_desc_EQ(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rosearch_desc_GE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rosearch_desc_GT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rosearch_desc_LE(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
+
+IndexT integer64_rosearch_desc_LT(ValueT *data, IndexT *index, IndexT l, IndexT r, ValueT value);
diff --git a/src/cache.c b/src/cache.c
new file mode 100644
index 0000000..32ad22d
--- /dev/null
+++ b/src/cache.c
@@ -0,0 +1,55 @@
+#include <R.h>
+#include <Rdefines.h>
+
+SEXP r_ram_truly_identical(
+  SEXP x_
+, SEXP y_
+)
+{
+	SEXP ret_;
+	Rboolean ret;
+	if(!isVectorAtomic(x_)){
+		error("SEXP is not atomic vector");
+			return R_NilValue;
+	}
+	if (TYPEOF(x_)!=TYPEOF(y_)){
+		error("vectors don't have identic type");
+		return R_NilValue;
+	}
+	//somehow is DATAPTR not declared: ret = DATAPTR(x_)==DATAPTR(y_) ? TRUE : FALSE;
+    switch (TYPEOF(x_)) {
+    case CHARSXP:
+		ret = CHAR(x_)==CHAR(y_) ? TRUE : FALSE;
+	break;
+    case LGLSXP:
+		ret = LOGICAL(x_)==LOGICAL(y_) ? TRUE : FALSE;
+    case INTSXP:
+		ret = INTEGER(x_)==INTEGER(y_) ? TRUE : FALSE;
+	break;
+    case REALSXP:
+		ret = REAL(x_)==REAL(y_) ? TRUE : FALSE;
+	break;
+    case CPLXSXP:
+		ret = COMPLEX(x_)==COMPLEX(y_) ? TRUE : FALSE;
+	break;
+    case STRSXP:
+		ret = STRING_PTR(x_)==STRING_PTR(y_) ? TRUE : FALSE;
+	break;
+    case VECSXP:
+		ret = VECTOR_PTR(x_)==VECTOR_PTR(y_) ? TRUE : FALSE;
+    case RAWSXP:
+		ret = RAW(x_)==RAW(y_) ? TRUE : FALSE;
+	break;
+    default:
+		error("unimplemented type in truly.identical");
+		return R_NilValue;
+    }
+	if (LENGTH(x_)!=LENGTH(y_)){
+		ret = FALSE;
+	}
+	PROTECT( ret_ = allocVector(LGLSXP, 1) );
+	INTEGER(ret_)[0] = ret;
+	UNPROTECT(1);
+	return ret_;
+}
+
diff --git a/src/hash64.c b/src/hash64.c
new file mode 100644
index 0000000..ec26e02
--- /dev/null
+++ b/src/hash64.c
@@ -0,0 +1,541 @@
+/*
+# C-Code for hashing and matching
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2012-10-22
+#*/
+
+/* for speed (should not really matter in this case as most time is spent in the hashing) */
+// #define USE_RINTERNALS 1
+#include <Rinternals.h>
+#include <R.h>
+
+//#include "timing.h"
+
+// This multiplicator was used in Simon Urbanek's package fastmatch for 32-bit integers
+//#define HASH64(X, SHIFT) (314159265358979323ULL * ((unsigned long long)(X)) >> (SHIFT))
+// This multiplicator seems to work fine with 64bit integers
+#define HASH64(X, SHIFT) (0x9e3779b97f4a7c13ULL * ((unsigned long long)(X)) >> (SHIFT))
+
+SEXP hashfun_integer64(SEXP x_, SEXP bits_, SEXP ret_){
+  int i, n = LENGTH(x_);
+  long long * x = (long long *) REAL(x_);
+  unsigned int * ret = (unsigned int *) INTEGER(ret_);
+  int shift = 64 - asInteger(bits_);
+  for(i=0; i<n; i++){
+	ret[i] = (unsigned int) HASH64(x[i], shift);
+  }
+  return ret_;
+}
+
+// this function is loosely following Simon Urbanek's package 'fastmatch'
+SEXP hashmap_integer64(SEXP x_, SEXP bits_, SEXP hashpos_, SEXP nunique_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  long long * x = (long long *) REAL(x_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  long long v;
+  int nunique = 0;
+  for(i=0; i<nx; ){
+    v = x[i++];
+	h = HASH64(v, shift);
+	while (hashpos[h] && x[hashpos[h] - 1] != v){
+		h++;
+		if (h == nh) 
+			h = 0;
+	  }
+	  if (!hashpos[h]){
+      hashpos[h] = i;
+      nunique++;
+	  }
+  }
+  INTEGER(nunique_)[0] = nunique;
+  return R_NilValue;
+}
+
+SEXP hashpos_integer64(SEXP x_, SEXP hashdat_, SEXP bits_, SEXP hashpos_, SEXP nomatch_, SEXP ret_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  long long * x = (long long *) REAL(x_);
+  long long * hashdat = (long long *) REAL(hashdat_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int * ret = INTEGER(ret_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  int nomatch = asInteger(nomatch_);
+  long long v;
+  for(i=0; i<nx; i++){
+    v = x[i];
+	h = HASH64(v, shift);
+    for(;;){
+	  if (hashpos[h]){  // this is mostly while(hashpos[h]) but we want to catch failure for the nomatch assignment
+		  if (hashdat[hashpos[h] - 1] == v){
+			ret[i] = hashpos[h];
+			break;
+		  }
+		  h++;
+		  if (h == nh) 
+			h = 0;
+	  }else{
+	    ret[i] = nomatch;
+		break;
+	  }
+	}
+  }
+  return R_NilValue;
+}
+
+SEXP hashrev_integer64(SEXP x_, SEXP hashdat_, SEXP bits_, SEXP hashpos_, SEXP nunique_, SEXP nomatch_, SEXP ret_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  int nd = LENGTH(hashdat_);
+  long long * x = (long long *) REAL(x_);
+  long long * hashdat = (long long *) REAL(hashdat_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int * ret = INTEGER(ret_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  int nomatch = asInteger(nomatch_);
+  int nunique = asInteger(nunique_);
+  int iunique=0;
+  long long v;
+  for(i=0; i<nx; ){
+	v = x[i++];
+	h = HASH64(v, shift);
+	while(hashpos[h]){
+	  if (hashdat[hashpos[h] - 1] == v){
+	    h = hashpos[h] - 1;
+		if (!ret[h]){
+			ret[h] = i;
+			if (++iunique==nunique)
+			  i=nx; // break out of for as well
+		}
+		break;
+	  }
+	  h++;
+	  if (h == nh) 
+		h = 0;
+	}
+  }
+  if (iunique<nd){
+    if (nunique<nd){ // some gaps are duplicates
+	  for(i=0; i<nd; i++){
+	    if (!ret[i]){
+			v = hashdat[i];
+			h = HASH64(v, shift);
+			while(hashpos[h]){  // this is mostly while(hashpos[h]) but we want to catch failure for the nomatch assignment
+			  if (hashdat[hashpos[h] - 1] == v){
+			    h = ret[hashpos[h] - 1];
+				if (h)
+				  ret[i] = h;
+				else
+				  ret[i] = nomatch;
+				break;
+			  }
+			  h++;
+			  if (h == nh) 
+				h = 0;
+			}
+		}
+	  }
+	}else{ // no duplicates: all gaps are nomatches
+	  for(i=0; i<nd; i++)
+	    if (!ret[i])
+		  ret[i] = nomatch;
+	}
+  }
+  return R_NilValue;
+}
+
+SEXP hashrin_integer64(SEXP x_, SEXP hashdat_, SEXP bits_, SEXP hashpos_, SEXP nunique_, SEXP ret_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  int nd = LENGTH(hashdat_);
+  long long * x = (long long *) REAL(x_);
+  long long * hashdat = (long long *) REAL(hashdat_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int * ret = INTEGER(ret_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  int nunique = asInteger(nunique_);
+  int iunique=0;
+  long long v;
+  for(i=0; i<nx; ){
+	v = x[i++];
+	h = HASH64(v, shift);
+	while(hashpos[h]){
+	  if (hashdat[hashpos[h] - 1] == v){
+	    h = hashpos[h] - 1;
+		if (!ret[h]){
+			ret[h] = TRUE;
+			if (++iunique==nunique)
+			  i=nx; // break out of for as well
+		}
+		break;
+	  }
+	  h++;
+	  if (h == nh) 
+		h = 0;
+	}
+  }
+    if (nunique<nd){ // some gaps are duplicates
+	  for(i=0; i<nd; i++){
+	    if (!ret[i]){
+			v = hashdat[i];
+			h = HASH64(v, shift);
+			while(hashpos[h]){  // this is mostly while(hashpos[h]) but we want to catch failure for the nomatch assignment
+			  if (hashdat[hashpos[h] - 1] == v){
+			    h = ret[hashpos[h] - 1];
+				if (h)
+				  ret[i] = TRUE;
+				break;
+			  }
+			  h++;
+			  if (h == nh) 
+				h = 0;
+			}
+		}
+	  }
+	}
+  return R_NilValue;
+}
+
+SEXP hashfin_integer64(SEXP x_, SEXP hashdat_, SEXP bits_, SEXP hashpos_, SEXP ret_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  long long * x = (long long *) REAL(x_);
+  long long * hashdat = (long long *) REAL(hashdat_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int * ret = LOGICAL(ret_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  long long v;
+  for(i=0; i<nx; i++){
+    v = x[i];
+	h = HASH64(v, shift);
+    for(;;){
+	  if (hashpos[h]){  // this is mostly while(hashpos[h]) but we want to catch failure for the nomatch assignment
+		  if (hashdat[hashpos[h] - 1] == v){
+			ret[i] = TRUE;
+			break;
+		  }
+		  h++;
+		  if (h == nh) 
+			h = 0;
+	  }else{
+	    ret[i] = FALSE;
+		break;
+	  }
+	}
+  }
+  return R_NilValue;
+}
+
+SEXP hashdup_integer64(SEXP hashdat_, SEXP bits_, SEXP hashpos_, SEXP nunique_, SEXP ret_){
+  int nu = LENGTH(ret_);
+  int h, nh = LENGTH(hashpos_);
+  //long long * hashdat = (long long *) REAL(hashdat_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int * ret = LOGICAL(ret_);
+  int nunique = asInteger(nunique_);
+  for(h=0; h<nu; h++)
+	ret[h] = TRUE;
+  for(h=0; h<nh; h++)
+    if (hashpos[h]>0){
+	  ret[hashpos[h]-1] = FALSE;
+	  nunique--;
+	  if (nunique<1)
+	    break;
+	}
+  return R_NilValue;
+}
+
+SEXP hashuni_integer64(SEXP hashdat_, SEXP bits_, SEXP hashpos_, SEXP keep_order_, SEXP ret_){
+  int h, nh = LENGTH(hashpos_);
+  int u, nu = LENGTH(ret_);
+  long long * hashdat = (long long *) REAL(hashdat_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  long long * ret = (long long *) REAL(ret_);
+  if (asLogical(keep_order_)){
+      int i;
+	  // int nx = LENGTH(hashdat_);
+	  int bits = asInteger(bits_);
+	  int shift = 64 - bits;
+	  long long v;
+	  for(u=0,i=0; u<nu; i++){
+		v = hashdat[i];
+		h = HASH64(v, shift);
+		while(hashpos[h] && hashdat[hashpos[h] - 1] != v){  // this is mostly while(hashpos[h]) but we want to catch failure for the nomatch assignment
+		  h++;
+		  if (h == nh) 
+			h = 0;
+		}
+		if (i == (hashpos[h] - 1)){
+		  ret[u++] = v; /* unique */
+		}
+	  }
+  }else{
+	  for(u=0,h=0; u<nu; h++)
+		if (hashpos[h]>0){
+		  ret[u++] = hashdat[hashpos[h]-1];
+		}
+  }
+  return R_NilValue;
+}
+
+SEXP hashmapuni_integer64(SEXP x_, SEXP bits_, SEXP hashpos_, SEXP nunique_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  int nu = 0;
+  SEXP ret_;
+  PROTECT_INDEX idx;
+  PROTECT_WITH_INDEX(ret_ = allocVector(REALSXP, nx), &idx);
+  long long * ret = (long long *) REAL(ret_);
+  long long * x = (long long *) REAL(x_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  long long v;
+  for(i=0; i<nx; ){
+	v = x[i++];
+	h = HASH64(v, shift);
+	while(hashpos[h] && x[hashpos[h] - 1] != v){
+		h++;
+		if (h == nh) 
+			h = 0;
+	}
+	if (!hashpos[h]){
+		hashpos[h] = i;
+		ret[nu++] = v;
+	}
+  }
+  INTEGER(nunique_)[0] = nu;
+  REPROTECT(ret_ = lengthgets(ret_, nu), idx);
+  UNPROTECT(1);
+  return ret_;
+}
+
+
+SEXP hashupo_integer64(SEXP hashdat_, SEXP bits_, SEXP hashpos_, SEXP keep_order_, SEXP ret_){
+  int h, nh = LENGTH(hashpos_);
+  int u, nu = LENGTH(ret_);
+  long long * hashdat = (long long *) REAL(hashdat_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int * ret = INTEGER(ret_);
+  if (asLogical(keep_order_)){
+      int i;
+	  // int nx = LENGTH(hashdat_);
+	  int bits = asInteger(bits_);
+	  int shift = 64 - bits;
+	  long long v;
+	  for(u=0,i=0; u<nu; i++){
+		v = hashdat[i];
+		h = HASH64(v, shift);
+		while(hashpos[h] && hashdat[hashpos[h] - 1] != v){  // this is mostly while(hashpos[h]) but we want to catch failure for the nomatch assignment
+		  h++;
+		  if (h == nh) 
+			h = 0;
+		}
+		if (i == (hashpos[h] - 1)){
+		  ret[u++] = hashpos[h]; /* unique */
+		}
+	  }
+  }else{
+	  for(u=0,h=0; u<nu; h++)
+		if (hashpos[h]>0){
+		  ret[u++] = hashpos[h];
+		}
+  }
+  return R_NilValue;
+}
+
+SEXP hashmapupo_integer64(SEXP x_, SEXP bits_, SEXP hashpos_, SEXP nunique_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  int nu = 0;
+  SEXP ret_;
+  PROTECT_INDEX idx;
+  PROTECT_WITH_INDEX(ret_ = allocVector(INTSXP, nx), &idx);
+  int * ret = INTEGER(ret_);
+  long long * x = (long long *) REAL(x_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  long long v;
+  for(i=0; i<nx; ){
+	v = x[i++];
+	h = HASH64(v, shift);
+	while(hashpos[h] && x[hashpos[h] - 1] != v){
+		h++;
+		if (h == nh) 
+			h = 0;
+	}
+	if (!hashpos[h]){
+		hashpos[h] = i;
+		ret[nu++] = hashpos[h];
+	}
+  }
+  INTEGER(nunique_)[0] = nu;
+  REPROTECT(ret_ = lengthgets(ret_, nu), idx);
+  UNPROTECT(1);
+  return ret_;
+}
+
+
+
+SEXP hashtab_integer641(SEXP hashdat_, SEXP bits_, SEXP hashpos_, SEXP nunique_){
+  int i, nx = LENGTH(hashdat_);
+  int h, nh = LENGTH(hashpos_);
+  int u;
+  long long * hashdat = (long long *) REAL(hashdat_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  //int * pos = INTEGER(pos_);
+  SEXP ret_;
+  PROTECT_INDEX idx;
+  PROTECT_WITH_INDEX(ret_ = allocVector(INTSXP, nh), &idx);
+  int * ret = INTEGER(ret_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  long long v;
+  for(i=0; i<nh; i++)
+	ret[i]=0;
+  for(i=0; i<nx; i++){
+	v = hashdat[i];
+	h = HASH64(v, shift);
+	while(hashpos[h]){  // this is mostly while(hashpos[h]) but we want to catch failure for the nomatch assignment
+	  if (hashdat[hashpos[h] - 1] == v){
+	    ret[h]++;
+		break;
+	  }
+	  h++;
+	  if (h == nh) 
+		h = 0;
+	}
+  }
+  for (u=0,h=0;h<nh;h++){
+    if (hashpos[h]){
+	  //pos[u]=hashpos[h];
+	  ret[u++]=ret[h];
+	}
+  }
+  REPROTECT(ret_ = lengthgets(ret_, u), idx);
+  UNPROTECT(1);
+  return ret_;
+}
+
+
+
+SEXP hashtab_integer64(SEXP x_, SEXP bits_, SEXP hashpos_, SEXP nunique_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  long long * x = (long long *) REAL(x_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  SEXP hashtab_;
+  PROTECT_INDEX idx;
+  PROTECT_WITH_INDEX(hashtab_ = allocVector(INTSXP, nh), &idx);
+  int *hashtab = INTEGER(hashtab_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  long long v;
+  int u, nu = INTEGER(nunique_)[0];
+
+  for(i=0; i<nh; i++)
+	hashtab[i]=0;
+  for(i=0; i<nx; ){
+    v = x[i++];
+	h = HASH64(v, shift);
+	while (hashpos[h] && x[hashpos[h] - 1] != v){
+		h++;
+		if (h == nh) 
+			h = 0;
+	}
+	hashtab[h]++;
+  }
+  SEXP tabval_;
+  PROTECT(tabval_ = allocVector(REALSXP, nu));
+  long long * tabval = (long long *) REAL(tabval_);
+  for (u=0,h=0;u<nu;h++){
+    if (hashpos[h]){
+	  tabval[u] = x[hashpos[h]-1];
+	  hashtab[u]=hashtab[h];
+	  u++;
+	}
+  }
+  REPROTECT(hashtab_ = lengthgets(hashtab_, nu), idx);
+  
+  SEXP class;
+  PROTECT(class = allocVector(STRSXP, 1));
+  SET_STRING_ELT(class, 0, mkChar("integer64"));
+  classgets(tabval_, class);
+  
+  SEXP ret_;
+  PROTECT(ret_ = allocVector(VECSXP, 2));
+  SET_VECTOR_ELT(ret_, 0, tabval_);
+  SET_VECTOR_ELT(ret_, 1, hashtab_);
+  
+  UNPROTECT(4);
+  return ret_;
+}
+
+
+
+SEXP hashmaptab_integer64(SEXP x_, SEXP bits_, SEXP hashpos_, SEXP nunique_){
+  int i, nx = LENGTH(x_);
+  int h, nh = LENGTH(hashpos_);
+  long long * x = (long long *) REAL(x_);
+  unsigned int * hashpos = (unsigned int *) INTEGER(hashpos_);
+  SEXP hashtab_;
+  PROTECT_INDEX idx;
+  PROTECT_WITH_INDEX(hashtab_ = allocVector(INTSXP, nh), &idx);
+  int *hashtab = INTEGER(hashtab_);
+  int bits = asInteger(bits_);
+  int shift = 64 - bits;
+  long long v;
+  int u, nu=0;
+  for(i=0; i<nh; i++)
+	hashtab[i]=0;
+  for(i=0; i<nx; ){
+    v = x[i++];
+	h = HASH64(v, shift);
+	while (hashpos[h] && x[hashpos[h] - 1] != v){
+		h++;
+		if (h == nh) 
+			h = 0;
+	}
+	if (!hashpos[h]){
+		hashpos[h] = i;
+		nu++;
+	}
+	hashtab[h]++;
+  }
+  SEXP tabval_;
+  PROTECT(tabval_ = allocVector(REALSXP, nu));
+  long long * tabval = (long long *) REAL(tabval_);
+  for (u=0,h=0;u<nu;h++){
+    if (hashpos[h]){
+	  tabval[u] = x[hashpos[h]-1];
+	  hashtab[u]=hashtab[h];
+	  u++;
+	}
+  }
+  INTEGER(nunique_)[0] = nu;
+  REPROTECT(hashtab_ = lengthgets(hashtab_, nu), idx);
+  
+  SEXP class;
+  PROTECT(class = allocVector(STRSXP, 1));
+  SET_STRING_ELT(class, 0, mkChar("integer64"));
+  classgets(tabval_, class);
+  
+  SEXP ret_;
+  PROTECT(ret_ = allocVector(VECSXP, 2));
+  SET_VECTOR_ELT(ret_, 0, tabval_);
+  SET_VECTOR_ELT(ret_, 1, hashtab_);
+  
+  UNPROTECT(4);
+  return ret_;
+}
diff --git a/src/init.c b/src/init.c
new file mode 100644
index 0000000..fab4cfc
--- /dev/null
+++ b/src/init.c
@@ -0,0 +1,219 @@
+#include <R.h>
+#include <Rinternals.h>
+#include <stdlib.h> // for NULL
+#include <R_ext/Rdynload.h>
+
+/* .Call calls */
+extern SEXP abs_integer64(SEXP, SEXP);
+extern SEXP all_integer64(SEXP, SEXP, SEXP);
+extern SEXP any_integer64(SEXP, SEXP, SEXP);
+extern SEXP as_bitstring_integer64(SEXP, SEXP);
+extern SEXP as_character_integer64(SEXP, SEXP);
+extern SEXP as_double_integer64(SEXP, SEXP);
+extern SEXP as_integer64_character(SEXP, SEXP);
+extern SEXP as_integer64_double(SEXP, SEXP);
+extern SEXP as_integer64_integer(SEXP, SEXP);
+extern SEXP as_integer_integer64(SEXP, SEXP);
+extern SEXP as_logical_integer64(SEXP, SEXP);
+extern SEXP cummax_integer64(SEXP, SEXP);
+extern SEXP cummin_integer64(SEXP, SEXP);
+extern SEXP cumprod_integer64(SEXP, SEXP);
+extern SEXP cumsum_integer64(SEXP, SEXP);
+extern SEXP diff_integer64(SEXP, SEXP, SEXP, SEXP);
+extern SEXP divide_integer64_double(SEXP, SEXP, SEXP);
+extern SEXP divide_integer64_integer64(SEXP, SEXP, SEXP);
+extern SEXP EQ_integer64(SEXP, SEXP, SEXP);
+extern SEXP GE_integer64(SEXP, SEXP, SEXP);
+extern SEXP GT_integer64(SEXP, SEXP, SEXP);
+extern SEXP hashdup_integer64(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashfin_integer64(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashfun_integer64(SEXP, SEXP, SEXP);
+extern SEXP hashmap_integer64(SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashmaptab_integer64(SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashmapuni_integer64(SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashmapupo_integer64(SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashpos_integer64(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashrev_integer64(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashrin_integer64(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashtab_integer64(SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashuni_integer64(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP hashupo_integer64(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP intdiv_integer64(SEXP, SEXP, SEXP);
+extern SEXP isna_integer64(SEXP, SEXP);
+extern SEXP LE_integer64(SEXP, SEXP, SEXP);
+extern SEXP lim_integer64(SEXP);
+extern SEXP log10_integer64(SEXP, SEXP);
+extern SEXP log2_integer64(SEXP, SEXP);
+extern SEXP logbase_integer64(SEXP, SEXP, SEXP);
+extern SEXP log_integer64(SEXP, SEXP);
+extern SEXP logvect_integer64(SEXP, SEXP, SEXP);
+extern SEXP LT_integer64(SEXP, SEXP, SEXP);
+extern SEXP max_integer64(SEXP, SEXP, SEXP);
+extern SEXP mean_integer64(SEXP, SEXP, SEXP);
+extern SEXP min_integer64(SEXP, SEXP, SEXP);
+extern SEXP minus_integer64(SEXP, SEXP, SEXP);
+extern SEXP mod_integer64(SEXP, SEXP, SEXP);
+extern SEXP NE_integer64(SEXP, SEXP, SEXP);
+extern SEXP plus_integer64(SEXP, SEXP, SEXP);
+extern SEXP power_integer64_double(SEXP, SEXP, SEXP);
+extern SEXP power_integer64_integer64(SEXP, SEXP, SEXP);
+extern SEXP prod_integer64(SEXP, SEXP, SEXP);
+extern SEXP range_integer64(SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_issorted_asc(SEXP);
+extern SEXP r_ram_integer64_mergeorder(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_mergesort(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_mergesortorder(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_nacount(SEXP);
+extern SEXP r_ram_integer64_orderdup_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_orderfin_asc(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_orderkey_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_ordernut(SEXP, SEXP);
+extern SEXP r_ram_integer64_orderord(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_orderpos_asc(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_orderrnk_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_ordertab_asc(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_ordertie_asc(SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_orderuni_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_orderupo_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_quickorder(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_quicksort(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_quicksortorder(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_radixorder(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_radixsort(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_radixsortorder(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_shellorder(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_shellsort(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_shellsortorder(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortfin_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortnut(SEXP);
+extern SEXP r_ram_integer64_sortorderdup_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortorderkey_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortorderord(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortorderpos_asc(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortorderrnk_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortordertab_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortordertie_asc(SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortorderuni_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortorderupo_asc(SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sortsrt(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP r_ram_integer64_sorttab_asc(SEXP, SEXP);
+extern SEXP r_ram_integer64_sortuni_asc(SEXP, SEXP);
+extern SEXP r_ram_truly_identical(SEXP, SEXP);
+extern SEXP seq_integer64(SEXP, SEXP, SEXP);
+extern SEXP sign_integer64(SEXP, SEXP);
+extern SEXP sqrt_integer64(SEXP, SEXP);
+extern SEXP sum_integer64(SEXP, SEXP, SEXP);
+extern SEXP times_integer64_double(SEXP, SEXP, SEXP);
+extern SEXP times_integer64_integer64(SEXP, SEXP, SEXP);
+
+static const R_CallMethodDef CallEntries[] = {
+    {"abs_integer64",                    (DL_FUNC) &abs_integer64,                    2},
+    {"all_integer64",                    (DL_FUNC) &all_integer64,                    3},
+    {"any_integer64",                    (DL_FUNC) &any_integer64,                    3},
+    {"as_bitstring_integer64",           (DL_FUNC) &as_bitstring_integer64,           2},
+    {"as_character_integer64",           (DL_FUNC) &as_character_integer64,           2},
+    {"as_double_integer64",              (DL_FUNC) &as_double_integer64,              2},
+    {"as_integer64_character",           (DL_FUNC) &as_integer64_character,           2},
+    {"as_integer64_double",              (DL_FUNC) &as_integer64_double,              2},
+    {"as_integer64_integer",             (DL_FUNC) &as_integer64_integer,             2},
+    {"as_integer_integer64",             (DL_FUNC) &as_integer_integer64,             2},
+    {"as_logical_integer64",             (DL_FUNC) &as_logical_integer64,             2},
+    {"cummax_integer64",                 (DL_FUNC) &cummax_integer64,                 2},
+    {"cummin_integer64",                 (DL_FUNC) &cummin_integer64,                 2},
+    {"cumprod_integer64",                (DL_FUNC) &cumprod_integer64,                2},
+    {"cumsum_integer64",                 (DL_FUNC) &cumsum_integer64,                 2},
+    {"diff_integer64",                   (DL_FUNC) &diff_integer64,                   4},
+    {"divide_integer64_double",          (DL_FUNC) &divide_integer64_double,          3},
+    {"divide_integer64_integer64",       (DL_FUNC) &divide_integer64_integer64,       3},
+    {"EQ_integer64",                     (DL_FUNC) &EQ_integer64,                     3},
+    {"GE_integer64",                     (DL_FUNC) &GE_integer64,                     3},
+    {"GT_integer64",                     (DL_FUNC) &GT_integer64,                     3},
+    {"hashdup_integer64",                (DL_FUNC) &hashdup_integer64,                5},
+    {"hashfin_integer64",                (DL_FUNC) &hashfin_integer64,                5},
+    {"hashfun_integer64",                (DL_FUNC) &hashfun_integer64,                3},
+    {"hashmap_integer64",                (DL_FUNC) &hashmap_integer64,                4},
+    {"hashmaptab_integer64",             (DL_FUNC) &hashmaptab_integer64,             4},
+    {"hashmapuni_integer64",             (DL_FUNC) &hashmapuni_integer64,             4},
+    {"hashmapupo_integer64",             (DL_FUNC) &hashmapupo_integer64,             4},
+    {"hashpos_integer64",                (DL_FUNC) &hashpos_integer64,                6},
+    {"hashrev_integer64",                (DL_FUNC) &hashrev_integer64,                7},
+    {"hashrin_integer64",                (DL_FUNC) &hashrin_integer64,                6},
+    {"hashtab_integer64",                (DL_FUNC) &hashtab_integer64,                4},
+    {"hashuni_integer64",                (DL_FUNC) &hashuni_integer64,                5},
+    {"hashupo_integer64",                (DL_FUNC) &hashupo_integer64,                5},
+    {"intdiv_integer64",                 (DL_FUNC) &intdiv_integer64,                 3},
+    {"isna_integer64",                   (DL_FUNC) &isna_integer64,                   2},
+    {"LE_integer64",                     (DL_FUNC) &LE_integer64,                     3},
+    {"lim_integer64",                    (DL_FUNC) &lim_integer64,                    1},
+    {"log10_integer64",                  (DL_FUNC) &log10_integer64,                  2},
+    {"log2_integer64",                   (DL_FUNC) &log2_integer64,                   2},
+    {"logbase_integer64",                (DL_FUNC) &logbase_integer64,                3},
+    {"log_integer64",                    (DL_FUNC) &log_integer64,                    2},
+    {"logvect_integer64",                (DL_FUNC) &logvect_integer64,                3},
+    {"LT_integer64",                     (DL_FUNC) &LT_integer64,                     3},
+    {"max_integer64",                    (DL_FUNC) &max_integer64,                    3},
+    {"mean_integer64",                   (DL_FUNC) &mean_integer64,                   3},
+    {"min_integer64",                    (DL_FUNC) &min_integer64,                    3},
+    {"minus_integer64",                  (DL_FUNC) &minus_integer64,                  3},
+    {"mod_integer64",                    (DL_FUNC) &mod_integer64,                    3},
+    {"NE_integer64",                     (DL_FUNC) &NE_integer64,                     3},
+    {"plus_integer64",                   (DL_FUNC) &plus_integer64,                   3},
+    {"power_integer64_double",           (DL_FUNC) &power_integer64_double,           3},
+    {"power_integer64_integer64",        (DL_FUNC) &power_integer64_integer64,        3},
+    {"prod_integer64",                   (DL_FUNC) &prod_integer64,                   3},
+    {"range_integer64",                  (DL_FUNC) &range_integer64,                  3},
+    {"r_ram_integer64_issorted_asc",     (DL_FUNC) &r_ram_integer64_issorted_asc,     1},
+    {"r_ram_integer64_mergeorder",       (DL_FUNC) &r_ram_integer64_mergeorder,       5},
+    {"r_ram_integer64_mergesort",        (DL_FUNC) &r_ram_integer64_mergesort,        4},
+    {"r_ram_integer64_mergesortorder",   (DL_FUNC) &r_ram_integer64_mergesortorder,   5},
+    {"r_ram_integer64_nacount",          (DL_FUNC) &r_ram_integer64_nacount,          1},
+    {"r_ram_integer64_orderdup_asc",     (DL_FUNC) &r_ram_integer64_orderdup_asc,     4},
+    {"r_ram_integer64_orderfin_asc",     (DL_FUNC) &r_ram_integer64_orderfin_asc,     5},
+    {"r_ram_integer64_orderkey_asc",     (DL_FUNC) &r_ram_integer64_orderkey_asc,     4},
+    {"r_ram_integer64_ordernut",         (DL_FUNC) &r_ram_integer64_ordernut,         2},
+    {"r_ram_integer64_orderord",         (DL_FUNC) &r_ram_integer64_orderord,         6},
+    {"r_ram_integer64_orderpos_asc",     (DL_FUNC) &r_ram_integer64_orderpos_asc,     6},
+    {"r_ram_integer64_orderrnk_asc",     (DL_FUNC) &r_ram_integer64_orderrnk_asc,     4},
+    {"r_ram_integer64_ordertab_asc",     (DL_FUNC) &r_ram_integer64_ordertab_asc,     5},
+    {"r_ram_integer64_ordertie_asc",     (DL_FUNC) &r_ram_integer64_ordertie_asc,     3},
+    {"r_ram_integer64_orderuni_asc",     (DL_FUNC) &r_ram_integer64_orderuni_asc,     4},
+    {"r_ram_integer64_orderupo_asc",     (DL_FUNC) &r_ram_integer64_orderupo_asc,     4},
+    {"r_ram_integer64_quickorder",       (DL_FUNC) &r_ram_integer64_quickorder,       6},
+    {"r_ram_integer64_quicksort",        (DL_FUNC) &r_ram_integer64_quicksort,        5},
+    {"r_ram_integer64_quicksortorder",   (DL_FUNC) &r_ram_integer64_quicksortorder,   6},
+    {"r_ram_integer64_radixorder",       (DL_FUNC) &r_ram_integer64_radixorder,       6},
+    {"r_ram_integer64_radixsort",        (DL_FUNC) &r_ram_integer64_radixsort,        5},
+    {"r_ram_integer64_radixsortorder",   (DL_FUNC) &r_ram_integer64_radixsortorder,   6},
+    {"r_ram_integer64_shellorder",       (DL_FUNC) &r_ram_integer64_shellorder,       5},
+    {"r_ram_integer64_shellsort",        (DL_FUNC) &r_ram_integer64_shellsort,        4},
+    {"r_ram_integer64_shellsortorder",   (DL_FUNC) &r_ram_integer64_shellsortorder,   5},
+    {"r_ram_integer64_sortfin_asc",      (DL_FUNC) &r_ram_integer64_sortfin_asc,      4},
+    {"r_ram_integer64_sortnut",          (DL_FUNC) &r_ram_integer64_sortnut,          1},
+    {"r_ram_integer64_sortorderdup_asc", (DL_FUNC) &r_ram_integer64_sortorderdup_asc, 4},
+    {"r_ram_integer64_sortorderkey_asc", (DL_FUNC) &r_ram_integer64_sortorderkey_asc, 4},
+    {"r_ram_integer64_sortorderord",     (DL_FUNC) &r_ram_integer64_sortorderord,     6},
+    {"r_ram_integer64_sortorderpos_asc", (DL_FUNC) &r_ram_integer64_sortorderpos_asc, 6},
+    {"r_ram_integer64_sortorderrnk_asc", (DL_FUNC) &r_ram_integer64_sortorderrnk_asc, 4},
+    {"r_ram_integer64_sortordertab_asc", (DL_FUNC) &r_ram_integer64_sortordertab_asc, 4},
+    {"r_ram_integer64_sortordertie_asc", (DL_FUNC) &r_ram_integer64_sortordertie_asc, 3},
+    {"r_ram_integer64_sortorderuni_asc", (DL_FUNC) &r_ram_integer64_sortorderuni_asc, 4},
+    {"r_ram_integer64_sortorderupo_asc", (DL_FUNC) &r_ram_integer64_sortorderupo_asc, 4},
+    {"r_ram_integer64_sortsrt",          (DL_FUNC) &r_ram_integer64_sortsrt,          5},
+    {"r_ram_integer64_sorttab_asc",      (DL_FUNC) &r_ram_integer64_sorttab_asc,      2},
+    {"r_ram_integer64_sortuni_asc",      (DL_FUNC) &r_ram_integer64_sortuni_asc,      2},
+    {"r_ram_truly_identical",            (DL_FUNC) &r_ram_truly_identical,            2},
+    {"seq_integer64",                    (DL_FUNC) &seq_integer64,                    3},
+    {"sign_integer64",                   (DL_FUNC) &sign_integer64,                   2},
+    {"sqrt_integer64",                   (DL_FUNC) &sqrt_integer64,                   2},
+    {"sum_integer64",                    (DL_FUNC) &sum_integer64,                    3},
+    {"times_integer64_double",           (DL_FUNC) &times_integer64_double,           3},
+    {"times_integer64_integer64",        (DL_FUNC) &times_integer64_integer64,        3},
+    {NULL, NULL, 0}
+};
+
+void R_init_bit64(DllInfo *dll)
+{
+    R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
+    R_useDynamicSymbols(dll, FALSE);
+    R_forceSymbols(dll, TRUE);
+}
diff --git a/src/integer64.c b/src/integer64.c
new file mode 100644
index 0000000..37f9fa4
--- /dev/null
+++ b/src/integer64.c
@@ -0,0 +1,958 @@
+/*
+# C-Code
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+#*/
+
+#define _INTEGER64_C_SRC
+
+
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                            MODULES USED                                 **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// this define before stdio.h removes the warnings
+// warning: unknown conversion type character 'l' in format [-Wformat]
+// warning: too many arguments for format [-Wformat-extra-args]
+#define __USE_MINGW_ANSI_STDIO 1
+
+#include "ctype.h"
+#include "stdio.h"
+#include <stdint.h>
+
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Error.h>
+#include <Rinternals.h>
+
+# include "integer64.h"
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                      DEFINITIONS AND MACROS                             **/
+/**                                                                         **/
+/*****************************************************************************/
+
+#define mod_iterate(n1,n2,i1,i2) for (i=i1=i2=0; i<n; i1 = (++i1 == n1) ? 0 : i1, i2 = (++i2 == n2) ? 0 : i2,++i)
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                      TYPEDEFS AND STRUCTURES                            **/
+/**                                                                         **/
+/*****************************************************************************/
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                   PROTOTYPYPES OF LOCAL FUNCTIONS                       **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// static
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                        EXPORTED VARIABLES                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// no static no extern
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                          GLOBAL VARIABLES                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// static
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                        EXPORTED FUNCTIONS                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// no extern
+
+SEXP as_integer64_double(SEXP x_, SEXP ret_){
+  long long i, n = LENGTH(x_);
+  long long * ret = (long long *) REAL(ret_);
+  double * x = REAL(x_);
+  double imin = (double) MIN_INTEGER64;
+  double imax = (double) MAX_INTEGER64;
+  Rboolean naflag = FALSE;
+  for (i=0; i<n; i++){
+    if (ISNAN(x[i])) 
+	  ret[i] = NA_INTEGER64;
+	else{
+	  if (x[i]<imin || x[i]>imax){
+	    ret[i] = NA_INTEGER64;
+		naflag = TRUE;
+	  }else
+        ret[i] = (long long) x[i];
+	}
+  }
+  if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP as_integer64_integer(SEXP x_, SEXP ret_){
+  long long i, n = LENGTH(x_);
+  long long * ret = (long long *) REAL(ret_);
+  int * x = INTEGER(x_); 
+  for (i=0; i<n; i++){
+    if (x[i]==NA_INTEGER) 
+	  ret[i] = NA_INTEGER64;
+	else
+      ret[i] = (long long) x[i];
+  }
+  return ret_;
+}
+
+
+SEXP as_double_integer64(SEXP x_, SEXP ret_){
+  long long i, n = LENGTH(x_);
+  long long * x = (long long *) REAL(x_); 
+  double * ret = REAL(ret_);
+  double rmax = pow(FLT_RADIX, DBL_MANT_DIG) - 1;
+  double rmin = -rmax;
+  Rboolean naflag = FALSE;
+  for (i=0; i<n; i++){
+    if (x[i]==NA_INTEGER64)
+      ret[i] = NA_REAL;
+	else{
+	  if (x[i]<rmin || x[i]>rmax)
+		naflag = TRUE;
+	  ret[i] = (double) x[i];
+	}
+  }
+  if (naflag)warning(INTEGER64_TODOUBLE_WARNING);
+  return ret_;
+}
+
+SEXP as_integer_integer64(SEXP x_, SEXP ret_){
+  long long i, n = LENGTH(x_);
+  long long * x = (long long *) REAL(x_); 
+  int * ret = INTEGER(ret_);
+  Rboolean naflag = FALSE;
+  for (i=0; i<n; i++){
+    if (x[i]==NA_INTEGER64)
+      ret[i] = NA_INTEGER;
+	else{
+	  if (x[i]<MIN_INTEGER32 || x[i]>MAX_INTEGER32){
+	    ret[i] = NA_INTEGER;
+		naflag = TRUE;
+	  }else
+	    ret[i] = (int) x[i];
+	}
+  }
+  if (naflag)warning(INTEGER32_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP as_logical_integer64(SEXP x_, SEXP ret_){
+  long long i, n = LENGTH(x_);
+  long long * x = (long long *) REAL(x_); 
+  int * ret = INTEGER(ret_);
+  for (i=0; i<n; i++){
+    if (x[i]==NA_INTEGER64)
+      ret[i] = NA_INTEGER;
+	else{
+	  ret[i] = x[i]==0 ? 0: 1;
+	}
+  }
+  return ret_;
+}
+
+
+SEXP as_character_integer64(SEXP x_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * x = (long long *) REAL(x_);
+  static char buff[NCHARS_DECS_INTEGER64];
+  for(i=0; i<n; i++){
+    if (x[i]==NA_INTEGER64){
+	  SET_STRING_ELT(ret_, i, NA_STRING);
+	}else{
+	  snprintf(buff, NCHARS_DECS_INTEGER64, COERCE_INTEGER64, x[i]); 
+	  SET_STRING_ELT(ret_, i, mkChar(buff)); 
+	}
+  }
+  return ret_;
+}
+
+SEXP as_integer64_character(SEXP x_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * ret = (long long *) REAL(ret_);
+  const char * str;
+  char * endpointer;
+  for(i=0; i<n; i++){
+	str = CHAR(STRING_ELT(x_, i)); endpointer = (char *)str; // thanks to Murray Stokely 28.1.2012
+	ret[i] = strtoll(str, &endpointer, 10);
+	if (*endpointer)
+	  ret[i] = NA_INTEGER64;
+  }
+  return ret_;
+}
+
+SEXP as_bitstring_integer64(SEXP x_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * x = (long long *) REAL(x_);
+  unsigned long long mask;
+  long long v;
+  static char buff[NCHARS_BITS_INTEGER64];
+  char * str;
+  for(i=0; i<n; i++){
+	v = x[i];
+	str = buff;
+	mask = LEFTBIT_INTEGER64;
+    while (mask){
+        if (v & mask)
+              *str = '1';
+          else 
+              *str = '0';
+        str++;
+        mask >>= 1;
+    }
+    *str = 0;
+    SET_STRING_ELT(ret_, i, mkChar(buff)); 
+  }
+  return ret_;
+}
+
+SEXP plus_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  long long * ret = (long long *) REAL(ret_);
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		PLUS64(e1[i1],e2[i2],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP minus_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  long long * ret = (long long *) REAL(ret_);
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		MINUS64(e1[i1],e2[i2],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP diff_integer64(SEXP x_, SEXP lag_, SEXP n_, SEXP ret_){
+  long long i, n = *((long long *) REAL(n_));
+  long long * x = (long long *) REAL(x_);
+  long long * lag = (long long *) REAL(lag_);
+  long long * ret = (long long *) REAL(ret_);
+  long long vlag = *lag;
+  long long v;
+  Rboolean naflag = FALSE;
+	for(i=0; i<n; i++) {
+	  v = x[i];
+	  MINUS64(x[i+vlag],v,ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP intdiv_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  long long * ret = (long long *) REAL(ret_);
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		INTDIV64(e1[i1],e2[i2],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_DIVISION_BY_ZERO_WARNING);
+  return ret_;
+}
+
+SEXP mod_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  long long * ret = (long long *) REAL(ret_);
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		MOD64(e1[i1],e2[i2],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_DIVISION_BY_ZERO_WARNING);
+  return ret_;
+}
+
+
+SEXP times_integer64_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  long long * ret = (long long *) REAL(ret_);
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		PROD64(e1[i1],e2[i2],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP times_integer64_double(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  double * e2 = REAL(e2_);
+  long long * ret = (long long *) REAL(ret_);
+  long double longret;
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		PROD64REAL(e1[i1],e2[i2],ret[i],naflag,longret)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP power_integer64_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  long long * ret = (long long *) REAL(ret_);
+  long double longret;
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		POW64(e1[i1],e2[i2],ret[i],naflag, longret)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP power_integer64_double(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  double * e2 = REAL(e2_);
+  long long * ret = (long long *) REAL(ret_);
+  long double longret;
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		POW64REAL(e1[i1],e2[i2],ret[i],naflag,longret)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP divide_integer64_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+   long long i, n = LENGTH(ret_);
+   long long i1, n1 = LENGTH(e1_);
+   long long i2, n2 = LENGTH(e2_);
+   long long * e1 = (long long *) REAL(e1_);
+   long long * e2 = (long long *) REAL(e2_);
+   double * ret = REAL(ret_);
+   Rboolean naflag = FALSE;
+	 mod_iterate(n1, n2, i1, i2) {
+		 DIVIDE64(e1[i1],e2[i2],ret[i],naflag)
+	 }
+	 if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+   return ret_;
+}
+SEXP divide_integer64_double(SEXP e1_, SEXP e2_, SEXP ret_){
+   long long i, n = LENGTH(ret_);
+   long long i1, n1 = LENGTH(e1_);
+   long long i2, n2 = LENGTH(e2_);
+   long long * e1 = (long long *) REAL(e1_);
+   double * e2 = REAL(e2_);
+   double * ret = REAL(ret_);
+   Rboolean naflag = FALSE;
+	 mod_iterate(n1, n2, i1, i2) {
+		 DIVIDE64REAL(e1[i1],e2[i2],ret[i],naflag)
+	 }
+	 if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+   return ret_;
+}
+
+SEXP sign_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+	for(i=0; i<n; i++) {
+		SIGN64(e1[i],ret[i])
+	}
+  return ret_;
+}
+
+SEXP abs_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+	for(i=0; i<n; i++) {
+		ABS64(e1[i],ret[i])
+	}
+  return ret_;
+}
+
+SEXP sqrt_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  double * ret = REAL(ret_);
+  Rboolean naflag = FALSE;
+  for(i=0; i<n; i++) {
+	SQRT64(e1[i],ret[i],naflag)
+  }
+  if (naflag)warning(INTEGER64_NAN_CREATED_WARNING);
+  return ret_;
+}
+
+SEXP log_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  double * ret = REAL(ret_);
+  Rboolean naflag = FALSE;
+	for(i=0; i<n; i++) {
+		LOG64(e1[i],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_NAN_CREATED_WARNING);
+  return ret_;
+}
+
+SEXP logvect_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  double * e2 = REAL(e2_);
+  double * ret = REAL(ret_);
+  Rboolean naflag = FALSE;
+	mod_iterate(n1, n2, i1, i2) {
+		LOGVECT64(e1[i],e2[i],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_NAN_CREATED_WARNING);
+  return ret_;
+}
+
+SEXP logbase_integer64(SEXP e1_, SEXP base_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  long double logbase = (long double) log(asReal(base_));
+  double * ret = REAL(ret_);
+  Rboolean naflag = (asReal(base_)>0) ? FALSE : TRUE;
+	for(i=0; i<n; i++) {
+		LOGBASE64(e1[i],logbase,ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_NAN_CREATED_WARNING);
+  return ret_;
+}
+
+SEXP log10_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  double * ret = REAL(ret_);
+  Rboolean naflag = FALSE;
+#ifdef HAVE_LOG10
+	for(i=0; i<n; i++) {
+		LOG1064(e1[i],ret[i],naflag)
+	}
+#else
+  long double logbase = (long double) log(10);
+  for(i=0; i<n; i++) {
+	LOGBASE64(e1[i],logbase,ret[i],naflag)
+  }
+#endif	
+  if (naflag)warning(INTEGER64_NAN_CREATED_WARNING);
+  return ret_;
+}
+
+SEXP log2_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  double * ret = REAL(ret_);
+  Rboolean naflag = FALSE;
+#ifdef HAVE_LOG2
+	for(i=0; i<n; i++) {
+		LOG264(e1[i],ret[i],naflag)
+	}
+#else
+  long double logbase = (long double) log(2);
+  for(i=0; i<n; i++) {
+	LOGBASE64(e1[i],logbase,ret[i],naflag)
+  }
+#endif	
+  if (naflag)warning(INTEGER64_NAN_CREATED_WARNING);
+  return ret_;
+}
+
+SEXP any_integer64(SEXP e1_, SEXP na_rm_, SEXP ret_){
+  long long i, n = LENGTH(e1_);
+  long long * e1 = (long long *) REAL(e1_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+  Rboolean hasna=FALSE;
+	if (asLogical(na_rm_)){
+		for(i=0; i<n; i++){
+			if (e1[i]!=NA_INTEGER64 && e1[i]){
+				ret[0] = TRUE;
+				return ret_;
+			}
+		}
+		ret[0] = FALSE;
+	}else{
+		for(i=0; i<n; i++){
+			if (e1[i]==NA_INTEGER64){
+				hasna = TRUE;
+			}else if (e1[i]){
+				ret[0] = TRUE;
+				return ret_;
+			}
+		}
+		ret[0] = hasna ? NA_LOGICAL : FALSE;
+	}
+  return ret_;
+}
+
+SEXP all_integer64(SEXP e1_, SEXP na_rm_, SEXP ret_){
+  long long i, n = LENGTH(e1_);
+  long long * e1 = (long long *) REAL(e1_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+  Rboolean hasna=FALSE;
+	if (asLogical(na_rm_)){
+		for(i=0; i<n; i++){
+			if (e1[i]!=NA_INTEGER64 && !e1[i]){
+				ret[0] = FALSE;
+				return ret_;
+			}
+		}
+		ret[0] = TRUE;
+	}else{
+		for(i=0; i<n; i++){
+			if (e1[i]==NA_INTEGER64){
+				hasna = TRUE;
+			}else if (!e1[i]){
+				ret[0] = FALSE;
+				return ret_;
+			}
+		}
+		ret[0] = hasna ? NA_LOGICAL : TRUE;
+	}
+  return ret_;
+}
+
+
+SEXP sum_integer64(SEXP e1_, SEXP na_rm_, SEXP ret_){
+  long long i, n = LENGTH(e1_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  long long cumsum, tempsum;
+  cumsum = 0;
+	if (asLogical(na_rm_)){
+		for(i=0; i<n; i++){
+			if (e1[i]!=NA_INTEGER64){
+				tempsum = cumsum + e1[i];
+				if (!GOODISUM64(cumsum, e1[i], tempsum)){
+					warning(INTEGER64_OVERFLOW_WARNING);
+					ret[0] = NA_INTEGER64;
+					return ret_;
+				}
+				cumsum = tempsum;
+			}
+		}
+	}else{
+		for(i=0; i<n; i++){
+			if (e1[i]==NA_INTEGER64){
+				ret[0] = NA_INTEGER64;
+				return ret_;
+			}else{
+				tempsum = cumsum + e1[i];
+				if (!GOODISUM64(cumsum, e1[i], tempsum)){
+					warning(INTEGER64_OVERFLOW_WARNING);
+					ret[0] = NA_INTEGER64;
+					return ret_;
+				}
+				cumsum = tempsum;
+			}
+		}
+	}
+  ret[0] = cumsum;
+  return ret_;
+}
+
+SEXP mean_integer64(SEXP e1_, SEXP na_rm_, SEXP ret_){
+	long long i, n = LENGTH(e1_);
+	long long * e1 = (long long *) REAL(e1_);
+	long long * ret = (long long *) REAL(ret_);
+	long double longret = 0;
+	if (asLogical(na_rm_)){
+		long long nvalid = 0;
+		for(i=0; i<n; i++){
+			if (e1[i]!=NA_INTEGER64){
+				longret += e1[i];
+				nvalid++;
+			}
+		}
+		ret[0] = longret / nvalid;
+	}else{
+		for(i=0; i<n; i++){
+			if (e1[i]==NA_INTEGER64){
+				ret[0] = NA_INTEGER64;
+				return ret_;
+			}else{
+				longret += e1[i];			
+			}
+		}
+		ret[0] = longret / n;
+	}
+  return ret_;
+}
+
+SEXP prod_integer64(SEXP e1_, SEXP na_rm_, SEXP ret_){
+  long long i, n = LENGTH(e1_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  long long cumprod, tempprod;
+  cumprod = 1;
+	if (asLogical(na_rm_)){
+		for(i=0; i<n; i++){
+			if (e1[i]!=NA_INTEGER64){
+				tempprod = cumprod * e1[i];
+				if (!GOODIPROD64(cumprod, e1[i], tempprod)){
+					warning(INTEGER64_OVERFLOW_WARNING);
+					ret[0] = NA_INTEGER64;
+					return ret_;
+				}
+				cumprod = tempprod;
+			}
+		}
+	}else{
+		for(i=0; i<n; i++){
+			if (e1[i]==NA_INTEGER64){
+				ret[0] = NA_INTEGER64;
+				return ret_;
+			}else{
+				tempprod = cumprod * e1[i];
+				if (!GOODIPROD64(cumprod, e1[i], tempprod)){
+					warning(INTEGER64_OVERFLOW_WARNING);
+					ret[0] = NA_INTEGER64;
+					return ret_;
+				}
+				cumprod = tempprod;
+			}
+		}
+	}
+  ret[0] = cumprod;
+  return ret_;
+}
+
+
+SEXP min_integer64(SEXP e1_, SEXP na_rm_, SEXP ret_){
+  long long i, n = LENGTH(e1_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  ret[0] = MAX_INTEGER64;
+	if (asLogical(na_rm_)){
+		for(i=0; i<n; i++){
+			if (e1[i]!=NA_INTEGER64 && e1[i]<ret[0]){
+				ret[0] = e1[i];
+			}
+		}
+	}else{
+		for(i=0; i<n; i++){
+			if (e1[i]==NA_INTEGER64){
+				ret[0] = NA_INTEGER64;
+				return ret_;
+			}else{
+				if (e1[i]<ret[0])
+					ret[0] = e1[i];
+			}
+		}
+	}
+  return ret_;
+}
+
+SEXP max_integer64(SEXP e1_, SEXP na_rm_, SEXP ret_){
+  long long i, n = LENGTH(e1_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  ret[0] = MIN_INTEGER64;
+	if (asLogical(na_rm_)){
+		for(i=0; i<n; i++){
+			if (e1[i]!=NA_INTEGER64 && e1[i]>ret[0]){
+				ret[0] = e1[i];
+			}
+		}
+	}else{
+		for(i=0; i<n; i++){
+			if (e1[i]==NA_INTEGER64){
+				ret[0] = NA_INTEGER64;
+				return ret_;
+			}else{
+				if (e1[i]>ret[0])
+					ret[0] = e1[i];
+			}
+		}
+	}
+  return ret_;
+}
+
+SEXP range_integer64(SEXP e1_, SEXP na_rm_, SEXP ret_){
+  long long i, n = LENGTH(e1_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  ret[0] = MAX_INTEGER64;
+  ret[1] = MIN_INTEGER64;
+	if (asLogical(na_rm_)){
+		for(i=0; i<n; i++){
+			if (e1[i]!=NA_INTEGER64){
+				if (e1[i]<ret[0])
+					ret[0] = e1[i];
+				if (e1[i]>ret[1])
+					ret[1] = e1[i];
+			}
+		}
+	}else{
+		for(i=0; i<n; i++){
+			if (e1[i]==NA_INTEGER64){
+				ret[0] = ret[1] = NA_INTEGER64;
+				return ret_;
+			}else{
+				if (e1[i]<ret[0])
+					ret[0] = e1[i];
+				if (e1[i]>ret[1])
+					ret[1] = e1[i];
+			}
+		}
+	}
+  return ret_;
+}
+
+SEXP lim_integer64(SEXP ret_){
+  long long * ret = (long long *) REAL(ret_);
+  ret[0] = MIN_INTEGER64;
+  ret[1] = MAX_INTEGER64;
+  return ret_;
+}
+
+
+SEXP cummin_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  if (n>0){
+	i=0;
+	ret[i] = e1[i];
+	if(e1[i]!=NA_INTEGER64)
+	for(i=1; i<n; i++){
+		if(e1[i]==NA_INTEGER64){
+			ret[i] = e1[i];
+			break;
+		}else{
+			ret[i] = e1[i]<ret[i-1] ? e1[i] : ret[i-1];		
+		}
+	}
+	for(i++; i<n; i++){
+		ret[i] = NA_INTEGER64;
+	}
+  }
+  return ret_;
+}
+
+SEXP cummax_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  if (n>0){
+	i=0;
+	ret[i] = e1[i];
+	if(e1[i]!=NA_INTEGER64)
+	for(i=1; i<n; i++){
+		if(e1[i]==NA_INTEGER64){
+			ret[i] = e1[i];
+			break;
+		}else{
+			ret[i] = e1[i]>ret[i-1] ? e1[i] : ret[i-1];		
+		}
+	}
+	for(i++; i<n; i++){
+		ret[i] = NA_INTEGER64;
+	}
+  }
+  return ret_;
+}
+
+SEXP cumsum_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  Rboolean naflag = FALSE;
+    if (n>0)
+	  ret[0] = e1[0];
+	for(i=1; i<n; i++) {
+		PLUS64(e1[i],ret[i-1],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP cumprod_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * ret = (long long *) REAL(ret_);
+  Rboolean naflag = FALSE;
+    if (n>0)
+	  ret[0] = e1[0];
+	for(i=1; i<n; i++) {
+		PROD64(e1[i],ret[i-1],ret[i],naflag)
+	}
+	if (naflag)warning(INTEGER64_OVERFLOW_WARNING);
+  return ret_;
+}
+
+SEXP seq_integer64(SEXP from_, SEXP by_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * from = (long long *) REAL(from_);
+  long long * by1 = (long long *) REAL(by_);
+  long long by = by1[0];
+  long long * ret = (long long *) REAL(ret_);
+  if (n>0){
+    ret[0] = from[0];
+	for(i=1; i<n; i++){
+		ret[i] = ret[i-1] + by;
+	}
+  }
+  return ret_;
+}
+
+SEXP isna_integer64(SEXP e1_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long * e1 = (long long *) REAL(e1_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+	for(i=0; i<n; i++) {
+		ret[i] = (e1[i]==NA_INTEGER64) ? TRUE : FALSE;
+	}
+  return ret_;
+}
+
+
+
+SEXP EQ_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+	mod_iterate(n1, n2, i1, i2) {
+		EQ64(e1[i1],e2[i2],ret[i])
+	}
+  return ret_;
+}
+
+SEXP NE_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+	mod_iterate(n1, n2, i1, i2) {
+		NE64(e1[i1],e2[i2],ret[i])
+	}
+  return ret_;
+}
+
+SEXP LT_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+	mod_iterate(n1, n2, i1, i2) {
+		LT64(e1[i1],e2[i2],ret[i])
+	}
+  return ret_;
+}
+
+SEXP LE_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+	mod_iterate(n1, n2, i1, i2) {
+		LE64(e1[i1],e2[i2],ret[i])
+	}
+  return ret_;
+}
+
+SEXP GT_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+	mod_iterate(n1, n2, i1, i2) {
+		GT64(e1[i1],e2[i2],ret[i])
+	}
+  return ret_;
+}
+
+SEXP GE_integer64(SEXP e1_, SEXP e2_, SEXP ret_){
+  long long i, n = LENGTH(ret_);
+  long long i1, n1 = LENGTH(e1_);
+  long long i2, n2 = LENGTH(e2_);
+  long long * e1 = (long long *) REAL(e1_);
+  long long * e2 = (long long *) REAL(e2_);
+  Rboolean * ret = (Rboolean *) LOGICAL(ret_);
+	mod_iterate(n1, n2, i1, i2) {
+		GE64(e1[i1],e2[i2],ret[i])
+	}
+  return ret_;
+}
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                           LOCAL FUNCTIONS                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// static
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                                EOF                                      **/
+/**                                                                         **/
+/*****************************************************************************/
+
+
+
+	
+
diff --git a/src/integer64.h b/src/integer64.h
new file mode 100644
index 0000000..095fa2c
--- /dev/null
+++ b/src/integer64.h
@@ -0,0 +1,334 @@
+/*
+# Header-Code
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+#*/
+
+	
+#ifndef _INTEGER64_INLCUDED
+#define _INTEGER64_INLCUDED
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                            MODULES USED                                 **/
+/**                                                                         **/
+/*****************************************************************************/
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                      DEFINITIONS AND MACROS                             **/
+/**                                                                         **/
+/*****************************************************************************/
+
+#define NA_INTEGER64 LLONG_MIN
+#define ISNA_INTEGER64(X)((X)==NA_INTEGER64)
+
+#define MIN_INTEGER64 LLONG_MIN+1
+#define MAX_INTEGER64 LLONG_MAX
+#define MIN_INTEGER32 INT_MIN+1
+#define MAX_INTEGER32 INT_MAX
+#define LEFTBIT_INTEGER64 ((unsigned long long int)0x8000000000000000)
+#define RIGHTBIT_INTEGER64 ((unsigned long long int)0x0000000000000001)
+#define NCHARS_BITS_INTEGER64 65
+#define NCHARS_DECS_INTEGER64 22
+#define COERCE_INTEGER64 "%lli"
+#define USES_TWOS_COMPLEMENT 1
+#define BITS_INTEGER64 64
+
+#if USES_TWOS_COMPLEMENT
+# define OPPOSITE_SIGNS(x, y) ((x < 0) ^ (y < 0))
+# define GOODISUM64(x, y, z) (((x) > 0) ? ((y) < (z)) : ! ((y) < (z)))
+# define GOODIDIFF64(x, y, z) (!(OPPOSITE_SIGNS(x, y) && OPPOSITE_SIGNS(x, z)))
+#else
+# define GOODISUM64(x, y, z) ((long double) (x) + (long double) (y) == (z))
+# define GOODIDIFF64(x, y, z) ((long double) (x) - (long double) (y) == (z))
+#endif
+#define GOODIPROD64(x, y, z) ((long double) (x) * (long double) (y) == (z))
+#define INTEGER32_OVERFLOW_WARNING "NAs produced by integer overflow"
+#define INTEGER64_OVERFLOW_WARNING "NAs produced by integer64 overflow"
+#define INTEGER64_DIVISION_BY_ZERO_WARNING "NAs produced due to division by zero"
+#define INTEGER64_NAN_CREATED_WARNING "NaNs produced"
+#define INTEGER64_TODOUBLE_WARNING "integer precision lost while converting to double"
+
+#define PLUS64(e1,e2,ret,naflag) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+		ret = e1 + e2; \
+		if (!GOODISUM64(e1, e2, ret)) \
+		  ret = NA_INTEGER64; \
+		if (ret == NA_INTEGER64) \
+			naflag = TRUE; \
+	}
+	
+#define MINUS64(e1,e2,ret,naflag) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+		ret = e1 - e2; \
+		if (!GOODIDIFF64(e1, e2, ret)) \
+		  ret = NA_INTEGER64; \
+		if (ret == NA_INTEGER64) \
+			naflag = TRUE; \
+	}
+
+#define PROD64(e1,e2,ret,naflag) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+		ret = e1 * e2; \
+		if (!GOODIPROD64(e1, e2, ret)) \
+		  ret = NA_INTEGER64; \
+		if (ret == NA_INTEGER64) \
+			naflag = TRUE; \
+	}
+
+#define PROD64REAL(e1,e2,ret,naflag,longret) \
+	if (e1 == NA_INTEGER64 || ISNAN(e2)) \
+		ret = NA_INTEGER64; \
+	else { \
+		longret = e1 * (long double) e2; \
+		if (isnan(longret) || longret>MAX_INTEGER64){ \
+		  naflag = TRUE; \
+		  ret = NA_INTEGER64; \
+		}else \
+		  ret = llroundl(longret); \
+	}
+
+#define POW64(e1,e2,ret,naflag, longret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+		longret = pow(e1, (long double) e2); \
+		if (isnan(longret)){ \
+		  naflag = TRUE; \
+		  ret = NA_INTEGER64; \
+		}else \
+		  ret = llroundl(longret); \
+	}
+
+#define POW64REAL(e1,e2,ret,naflag,longret) \
+	if (e1 == NA_INTEGER64 || ISNAN(e2)) \
+		ret = NA_INTEGER64; \
+	else { \
+		longret = pow(e1, (long double) e2); \
+		if (isnan(longret)){ \
+		  naflag = TRUE; \
+		  ret = NA_INTEGER64; \
+		}else \
+		  ret = llroundl(longret); \
+	}
+
+#define DIVIDE64REAL(e1,e2,ret,naflag) \
+	if (e1 == NA_INTEGER64 || ISNAN(e2)) \
+		ret = NA_REAL; \
+	else { \
+	    if (e2==0) \
+			ret = NA_REAL; \
+		else \
+			ret = (double)((long double) e1 / (long double) e2); \
+		if (ISNAN(ret)) \
+			naflag = TRUE; \
+	}
+
+#define DIVIDE64(e1,e2,ret,naflag) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_REAL; \
+	else { \
+	    if (e2==0) \
+			ret = NA_REAL; \
+		else \
+			ret = (double)((long double) e1 / (long double) e2); \
+		if (ISNAN(ret)) \
+			naflag = TRUE; \
+	}
+
+#define INTDIV64(e1,e2,ret,naflag) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+	    if (e2==0) \
+			ret = NA_INTEGER64; \
+		else \
+			ret = e1 / e2; \
+		if (ret == NA_INTEGER64) \
+			naflag = TRUE; \
+	}
+
+#define MOD64(e1,e2,ret,naflag) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+	    if (e2==0) \
+			ret = NA_INTEGER64; \
+		else \
+			ret = e1 / e2; \
+		if (ret == NA_INTEGER64) \
+			naflag = TRUE; \
+		else \
+			ret = e1 - e2 * ret; \
+	}
+
+#define MIN64(e1,e2,ret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+		ret = (e1 < e2) ? e1 : e2; \
+	}
+
+#define MAX64(e1,e2,ret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+		ret = (e1 < e2) ? e2 : e1; \
+	}
+	
+#define ABS64(e1,ret) \
+	if (e1 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+		ret = (e1 < 0) ? -e1 : e1; \
+	}
+
+#define SQRT64(e1, ret, naflag) \
+	if (e1 == NA_INTEGER64) \
+		ret = NA_REAL; \
+	else { \
+		if (e1 < 0) \
+			naflag = TRUE; \
+		ret = (double) sqrt((long double)e1); \
+	}
+
+#define LOG64(e1, ret, naflag) \
+	if (e1 == NA_INTEGER64) \
+		ret = NA_REAL; \
+	else { \
+		ret = (double) logl((long double)e1); \
+		if (isnan(ret)) \
+			naflag = TRUE; \
+	}
+
+#define LOGVECT64(e1, e2, ret, naflag) \
+	if (e1 == NA_INTEGER64) \
+		ret = NA_REAL; \
+	else { \
+		ret = (double) logl((long double)e1)/log(e2); \
+		if (isnan(ret)) \
+			naflag = TRUE; \
+	}
+
+#define LOGBASE64(e1, e2, ret, naflag) \
+	if (e1 == NA_INTEGER64) \
+		ret = NA_REAL; \
+	else { \
+		ret = (double) logl((long double)e1)/e2; \
+		if (isnan(ret)) \
+			naflag = TRUE; \
+	}
+
+#define LOG1064(e1, ret, naflag) \
+	if (e1 == NA_INTEGER64) \
+		ret = NA_REAL; \
+	else { \
+		ret =(double)  log10l((long double)e1); \
+		if (isnan(ret)) \
+			naflag = TRUE; \
+	}
+
+#define LOG264(e1, ret, naflag) \
+if (e1 == NA_INTEGER64) \
+	ret = NA_REAL; \
+else { \
+	ret = (double) log2l((long double)e1); \
+		if (isnan(ret)) \
+			naflag = TRUE; \
+}
+
+
+#define SIGN64(e1,ret) \
+	if (e1 == NA_INTEGER64) \
+		ret = NA_INTEGER64; \
+	else { \
+		ret = (e1 < 0) ? -1 : ((e1 > 0) ? 1 : 0); \
+	}
+
+#define EQ64(e1,e2,ret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_LOGICAL; \
+	else { \
+		ret = (e1 == e2) ? TRUE : FALSE; \
+	}
+
+#define NE64(e1,e2,ret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_LOGICAL; \
+	else { \
+		ret = (e1 != e2) ? TRUE : FALSE; \
+	}
+
+#define LT64(e1,e2,ret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_LOGICAL; \
+	else { \
+		ret = (e1 < e2) ? TRUE : FALSE; \
+	}
+
+#define LE64(e1,e2,ret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_LOGICAL; \
+	else { \
+		ret = (e1 <= e2) ? TRUE : FALSE; \
+	}
+
+#define GT64(e1,e2,ret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_LOGICAL; \
+	else { \
+		ret = (e1 > e2) ? TRUE : FALSE; \
+	}
+
+#define GE64(e1,e2,ret) \
+	if (e1 == NA_INTEGER64 || e2 == NA_INTEGER64) \
+		ret = NA_LOGICAL; \
+	else { \
+		ret = (e1 >= e2) ? TRUE : FALSE; \
+	}
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                      TYPEDEFS AND STRUCTURES                            **/
+/**                                                                         **/
+/*****************************************************************************/
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                        EXPORTED VARIABLES                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+
+#ifndef _INTEGER64_C_SRC
+
+#endif
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                        EXPORTED FUNCTIONS                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+
+#endif
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                                EOF                                      **/
+/**                                                                         **/
+/*****************************************************************************/
+
diff --git a/src/sort64.c b/src/sort64.c
new file mode 100644
index 0000000..bdf6023
--- /dev/null
+++ b/src/sort64.c
@@ -0,0 +1,2176 @@
+/*
+# C-Code for sorting and ordering
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+*/
+
+#define _SORT64_C_SRC
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                            MODULES USED                                 **/
+/**                                                                         **/
+/*****************************************************************************/
+
+#include "sort64.h"
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                      DEFINITIONS AND MACROS                             **/
+/**                                                                         **/
+/*****************************************************************************/
+
+#define SHELLARRAYSIZE 16
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                      TYPEDEFS AND STRUCTURES                            **/
+/**                                                                         **/
+/*****************************************************************************/
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                   PROTOTYPYPES OF LOCAL FUNCTIONS                       **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// static
+// returns uniform random index in range 0..(n-1)
+static IndexT randIndex(
+  IndexT n    // number of positions to random select from
+);
+
+// returns one of {a,b,c} such that it represents the median of data[{a,b,c}]
+static IndexT ram_integer64_median3(
+ValueT *data  // pointer to data
+, IndexT a    // pos in data
+, IndexT b    // pos in data
+, IndexT c    // pos in data
+);
+
+// returns one of {a,b,c} such that it represents the median of data[index[{a,b,c}]]
+static IndexT ram_integer64_median3index(
+  ValueT *data    // pointer to data
+, IndexT *index   // index positions into data
+, IndexT a        // pos in index
+, IndexT b        // pos in index
+, IndexT c        // pos in index
+);
+
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                        EXPORTED VARIABLES                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// no static no extern
+
+IndexT compare_counter;
+IndexT move_counter;
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                          GLOBAL VARIABLES                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// static
+
+static const ValueT shellincs[SHELLARRAYSIZE] = {1073790977, 268460033, 67121153, 16783361, 4197377,
+           1050113, 262913, 65921, 16577, 4193, 1073, 281, 77,
+           23, 8, 1};
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                        EXPORTED FUNCTIONS                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// no extern
+
+
+/* { === NA handling for integer64 ================================================ */
+       
+// post sorting NA handling 
+int ram_integer64_fixsortNA(
+  ValueT *data       // RETURNED: pointer to data vector
+, IndexT n           // length of data vector
+, int has_na         // 0 for pure doubles, 1 if NA or NaN can be present
+, int na_last        // 0 for placing NA NaN left, 1 for placing NA NaN right
+, int decreasing     // 0 for ascending, 1 for descending (must match the same parameter in sorting)
+)
+{
+  if (has_na){
+    IndexT i,nNA = 0 ;
+    if (decreasing){
+    for (i=n-1; i>=0; i--){
+      if (ISNA_INTEGER64(data[i]))
+      nNA++;
+    else
+      break;
+    }
+    if (!na_last){
+      for (;i>=0; i--)
+      data[i+nNA] = data[i];
+      for (i=nNA-1;i>=0; i--)
+      data[i] = NA_INTEGER64;
+    }
+  }else{
+    for (i=0; i<n; i++){
+      if (ISNA_INTEGER64(data[i]))
+      nNA++;
+    else
+      break;
+    }
+    if (na_last){
+      for (;i<n; i++)
+      data[i-nNA] = data[i];
+      for (i=n-nNA;i<n; i++)
+      data[i] = NA_INTEGER64;
+    }
+  }
+  return nNA;
+  }else{
+    return 0;
+  }
+}
+
+// post sortordering NA handling 
+int ram_integer64_fixsortorderNA(
+  ValueT *data      // RETURNED: pointer to data vector
+, IndexT *index      // RETURNED: pointer to index vector
+, IndexT n           // length of vectors
+, int has_na         // 0 for pure doubles, 1 if NA or NaN can be present
+, int na_last        // 0 for placing NA NaN left, 1 for placing NA NaN right
+, int decreasing     // 0 for ascending, 1 for descending (must match the same parameter in sorting)
+, IndexT *auxindex  // MODIFIED: pointer to auxilliary index vector
+)
+{
+  if (has_na){
+    IndexT i,offset, nNA = 0 ;
+    if (decreasing){
+    for (i=n-1; i>=0; i--){
+      if (ISNA_INTEGER64(data[i]))
+      nNA++;
+    else
+      break;
+    }
+    if (!na_last){
+      if (!auxindex)
+         auxindex = (IndexT *) R_alloc(nNA, sizeof(IndexT));
+       offset = n-nNA;
+       for (i=nNA-1;i>=0;i--)
+         auxindex[i] = index[offset + i];
+       for (i=offset-1;i>=0;i--){
+       index[i+nNA] = index[i];
+       data[i+nNA] = data[i];
+       }
+       for (i=nNA-1;i>=0;i--){
+         index[i] = auxindex[i];
+       data[i] = NA_INTEGER64;
+       }
+    }
+  }else{
+    for (i=0; i<n; i++){
+      if (ISNA_INTEGER64(data[i]))
+      nNA++;
+    else
+      break;
+    }
+    if (na_last){
+      if (!auxindex)
+         auxindex = (IndexT *) R_alloc(nNA, sizeof(IndexT));
+       for (i=0;i<nNA;i++)
+         auxindex[i] = index[i];
+       for (i=nNA;i<n; i++){
+       index[i-nNA] = index[i];
+       data[i-nNA] = data[i];
+       }
+       offset = n-nNA;
+       for (i=offset;i<n; i++){
+         index[i] = auxindex[i-offset];
+       data[i] = NA_INTEGER64;
+       }
+    }
+  }
+  return nNA;
+  }else{
+    return 0;
+  }
+}
+
+// post ordering NA handling 
+int ram_integer64_fixorderNA(
+  ValueT *data          // UNCHANGED: pointer to data vector
+, IndexT *index         // RETURNED: pointer to index vector
+, IndexT n              // length of vectors
+, int has_na            // 0 for pure doubles, 1 if NA or NaN can be present
+, int na_last           // 0 for placing NA NaN left, 1 for placing NA NaN right
+, int decreasing        // 0 for ascending, 1 for descending (must match the same parameter in sorting)
+, IndexT *auxindex      // MODIFIED: pointer to auxilliary index vector
+)
+{
+  if (has_na){
+    IndexT i,offset, nNA = 0 ;
+    if (decreasing){
+    for (i=n-1; i>=0; i--){
+      if (ISNA_INTEGER64(data[index[i]]))
+      nNA++;
+    else
+      break;
+    }
+    if (!na_last){
+      if (!auxindex)
+         auxindex = (IndexT *) R_alloc(nNA, sizeof(IndexT));
+       offset = n-nNA;
+       for (i=nNA-1;i>=0;i--)
+         auxindex[i] = index[offset + i];
+       for (i=offset-1;i>=0;i--){
+       index[i+nNA] = index[i];
+       }
+       for (i=nNA-1;i>=0;i--){
+         index[i] = auxindex[i];
+       }
+    }
+  }else{
+    for (i=0; i<n; i++){
+      if (ISNA_INTEGER64(data[index[i]]))
+      nNA++;
+    else
+      break;
+    }
+    if (na_last){
+      if (!auxindex)
+         auxindex = (IndexT *) R_alloc(nNA, sizeof(IndexT));
+       for (i=0;i<nNA;i++)
+         auxindex[i] = index[i];
+       for (i=nNA;i<n; i++)
+       index[i-nNA] = index[i];
+       offset = n-nNA;
+       for (i=offset;i<n; i++)
+         index[i] = auxindex[i-offset];
+    }
+  }
+  return nNA;
+  }else{
+    return 0;
+  }
+}
+
+
+
+/* } === NA handling for integer64 ================================================ */
+       
+       
+/* { === pure C stable insertion sort for integer64 ================================================ */
+
+// ascending insertion sorting
+void ram_integer64_insertionsort_asc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+)
+{
+  IndexT i;
+  ValueT t;
+  for (i=r;i>l;i--){
+    COMPEXCH(data[i-1], data[i], t)
+  }
+  for (i=l+2;i<=r;i++){
+    IndexT j=i;
+    ValueT v;
+    MOVE(v, data[i])
+    while (LESS(v,data[j-1])){
+      MOVE(data[j], data[j-1]) 
+    j--;
+    }
+    MOVE(data[j], v)
+  }
+}
+
+// ascending insertion sortordering
+void ram_integer64_insertionsortorder_asc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+)
+{
+  IndexT i, ti;
+  ValueT t;
+  for (i=r;i>l;i--){
+    COMPEXCHi(data[i-1], data[i], t, index[i-1], index[i], ti)
+  }
+  for (i=l+2;i<=r;i++){
+    IndexT j=i, vi;
+    ValueT v;
+    MOVE(vi, index[i])
+    MOVE(v, data[i])
+    while (LESS(v,data[j-1])){
+      MOVE(index[j], index[j-1])
+      MOVE(data[j], data[j-1]) 
+    j--;
+    }
+    MOVE(index[j], vi)
+    MOVE(data[j], v)
+  }
+}
+
+// ascending insertion sortordering
+void ram_integer64_insertionorder_asc(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+)
+{
+  IndexT i, ti;
+  for (i=r;i>l;i--){
+    KEYCOMPEXCH(index[i-1], index[i], ti)
+  }
+  for (i=l+2;i<=r;i++){
+    IndexT j=i, vi;
+    ValueT v;
+    MOVE(vi, index[i])
+    MOVE(v, data[vi])
+    while (LESS(v,data[index[j-1]])){
+      MOVE(index[j], index[j-1])
+    j--;
+    }
+    MOVE(index[j], vi)
+  }
+}
+
+
+
+// descending insertion sorting
+void ram_integer64_insertionsort_desc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+)
+{
+  IndexT i;
+  ValueT t;
+  for (i=l;i<r;i++){
+    COMPEXCH(data[i+1], data[i], t)
+  }
+  for (i=r-2;i>=l;i--){
+    IndexT j=i;
+    ValueT v;
+    MOVE(v, data[i])
+    while (LESS(v,data[j+1])){
+      MOVE(data[j], data[j+1]) j++;
+    }
+    MOVE(data[j], v)
+  }
+}
+
+// descending insertion sortordering
+void ram_integer64_insertionsortorder_desc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+)
+{
+  IndexT i, ti;
+  ValueT t;
+  for (i=l;i<r;i++){
+    COMPEXCHi(data[i+1], data[i], t, index[i+1], index[i], ti)
+  }
+  for (i=r-2;i>=l;i--){
+    IndexT j=i, vi;
+    ValueT v;
+    MOVE(vi, index[i])
+    MOVE(v, data[i])
+    while (LESS(v,data[j+1])){
+      MOVE(index[j], index[j+1]) 
+      MOVE(data[j], data[j+1]) 
+    j++;
+    }
+    MOVE(index[j], vi)
+    MOVE(data[j], v)
+  }
+}
+
+// descending insertion sortordering
+void ram_integer64_insertionorder_desc(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+)
+{
+  IndexT i, ti;
+  for (i=l;i<r;i++){
+    KEYCOMPEXCH(index[i+1], index[i], ti)
+  }
+  for (i=r-2;i>=l;i--){
+    IndexT j=i, vi;
+    ValueT v;
+    MOVE(vi, index[i])
+    MOVE(v, data[vi])
+    while (LESS(v,data[index[j+1]])){
+      MOVE(index[j], index[j+1]) 
+    j++;
+    }
+    MOVE(index[j], vi)
+  }
+}
+
+/* } === pure C stable insertion sort for integer64 ================================================ */
+
+
+
+
+
+/* { === pure C stable shell sort for integer64 ================================================ */
+
+
+void ram_integer64_shellsort_asc(ValueT *data, IndexT l, IndexT r)
+{
+    ValueT v;
+    IndexT i, j, h, lh, t, n=r-l+1;
+    for (t = 0; shellincs[t] > n; t++);
+    for (h = shellincs[t]; t < SHELLARRAYSIZE; h = shellincs[++t]){
+      lh = l+h;
+      for (i = lh; i <= r; i++) {
+      MOVE(v, data[i])
+          j = i;
+          while (j >= lh && LESS(v, data[j - h])){
+        MOVE(data[j], data[j - h])
+            j -= h;
+          }
+      MOVE(data[j], v)
+      }
+    }
+}
+void ram_integer64_shellsort_desc(ValueT *data, IndexT l, IndexT r)
+{
+    ValueT v;
+    IndexT i, j, h, lh, t, n=r-l+1;
+    for (t = 0; shellincs[t] > n; t++);
+    for (h = shellincs[t]; t < SHELLARRAYSIZE; h = shellincs[++t]){
+      lh = l+h;
+      for (i = lh; i <= r; i++) {
+      MOVE(v, data[i])
+          j = i;
+          while (j >= lh && LESS(data[j - h], v)){
+        MOVE(data[j], data[j - h])
+            j -= h;
+          }
+      MOVE(data[j], v)
+      }
+    }
+}
+
+void ram_integer64_shellsortorder_asc(ValueT *data, IndexT *index, IndexT l, IndexT r)
+{
+    ValueT v;
+    IndexT vi, i, j, h, lh, t, n=r-l+1;
+    for (t = 0; shellincs[t] > n; t++);
+    for (h = shellincs[t]; t < SHELLARRAYSIZE; h = shellincs[++t]){
+      lh = l+h;
+      for (i = lh; i <= r; i++) {
+      MOVE(vi, index[i])
+      MOVE(v, data[i])
+          j = i;
+          while (j >= lh && LESS(v, data[j - h])){
+        MOVE(index[j], index[j - h])
+        MOVE(data[j], data[j - h])
+            j -= h;
+          }
+      MOVE(index[j], vi)
+      MOVE(data[j], v)
+      }
+    }
+}
+void ram_integer64_shellsortorder_desc(ValueT *data, IndexT *index, IndexT l, IndexT r)
+{
+    ValueT v;
+    IndexT vi, i, j, h, lh, t, n=r-l+1;
+    for (t = 0; shellincs[t] > n; t++);
+    for (h = shellincs[t]; t < SHELLARRAYSIZE; h = shellincs[++t]){
+      lh = l+h;
+      for (i = lh; i <= r; i++) {
+      MOVE(vi, index[i])
+      MOVE(v, data[i])
+          j = i;
+          while (j >= lh && LESS(data[j - h], v)){
+        MOVE(index[j], index[j - h])
+        MOVE(data[j], data[j - h])
+            j -= h;
+          }
+      MOVE(index[j], vi)
+      MOVE(data[j], v)
+      }
+    }
+}
+
+void ram_integer64_shellorder_asc(ValueT *data, IndexT *index, IndexT l, IndexT r)
+{
+    ValueT v;
+    IndexT vi, i, j, h, lh, t, n=r-l+1;
+    for (t = 0; shellincs[t] > n; t++);
+    for (h = shellincs[t]; t < SHELLARRAYSIZE; h = shellincs[++t]){
+      lh = l+h;
+      for (i = lh; i <= r; i++) {
+      MOVE(vi, index[i])
+      MOVE(v, data[vi])
+          j = i;
+          while (j >= lh && LESS(v, data[index[j - h]])){
+        MOVE(index[j], index[j - h])
+            j -= h;
+          }
+      MOVE(index[j], vi)
+      }
+    }
+}
+void ram_integer64_shellorder_desc(ValueT *data, IndexT *index, IndexT l, IndexT r)
+{
+    ValueT v;
+    IndexT vi, i, j, h, lh, t, n=r-l+1;
+    for (t = 0; shellincs[t] > n; t++);
+    for (h = shellincs[t]; t < SHELLARRAYSIZE; h = shellincs[++t]){
+      lh = l+h;
+      for (i = lh; i <= r; i++) {
+      MOVE(vi, index[i])
+      MOVE(v, data[vi])
+          j = i;
+          while (j >= lh && LESS(data[index[j - h]], v)){
+        MOVE(index[j], index[j - h])
+            j -= h;
+          }
+      MOVE(index[j], vi)
+      }
+    }
+}
+
+
+/* } === pure C stable shellsort sort for integer64 ================================================ */
+
+
+
+
+/* { === pure C stable merge sort for integer64 ================================================ */
+
+/* Sedgewick 8.1 Merging
+   stable merge c=a+b where na=len(a) and nb=len(b) */
+
+// ascending merge for sorting
+void ram_integer64_sortmerge_asc(
+  ValueT *c   // pointer to merge target data vector
+, ValueT *a   // pointer to merge source data vector a
+, ValueT *b   // pointer to merge source data vector b
+, IndexT na   // number of elements in merge source vector a
+, IndexT nb   // number of elements in merge source vector b
+)
+{
+  IndexT i,j,k,K=na+nb;
+  for (i=0,j=0,k=0;k<K;k++){
+    if (i==na){
+      for (;k<K;k++)
+        MOVE(c[k],b[j++])
+      break;
+    }
+    if (j==nb){
+      for (;k<K;k++)
+        MOVE(c[k],a[i++])
+      break;
+    }
+  if (LESS(b[j],a[i]))
+    MOVE(c[k],b[j++])
+  else
+    MOVE(c[k],a[i++])
+  }
+}
+
+// ascending merge for ordering
+void ram_integer64_ordermerge_asc(
+ValueT *data  // data vector
+, IndexT *c   // pointer to merge target index vector
+, IndexT *a   // pointer to merge source index vector a
+, IndexT *b   // pointer to merge source index vector b
+, IndexT na   // number of elements in merge source vector a
+, IndexT nb   // number of elements in merge source vector b
+)
+{
+  IndexT i,j,k,K=na+nb;
+  for (i=0,j=0,k=0;k<K;k++){
+    if (i==na){
+      for (;k<K;k++)
+        MOVE(c[k],b[j++])
+      break;
+    }
+    if (j==nb){
+      for (;k<K;k++)
+        MOVE(c[k],a[i++])
+      break;
+    }
+  if (KEYLESS(b[j],a[i]))
+    MOVE(c[k],b[j++])
+  else
+    MOVE(c[k],a[i++])
+  }
+}
+
+// ascending merge for sortordering
+void ram_integer64_sortordermerge_asc(
+  ValueT *c     // pointer to merge target data vector
+, ValueT *a     // pointer to merge source data vector a
+, ValueT *b     // pointer to merge source data vector b
+, IndexT *ci    // pointer to merge target index vector
+, IndexT *ai    // pointer to merge source index vector a
+, IndexT *bi    // pointer to merge source index vector b
+, IndexT na     // number of elements in merge source vector a
+, IndexT nb     // number of elements in merge source vector b
+)
+{
+  IndexT i,j,k,K=na+nb;
+  for (i=0,j=0,k=0;k<K;k++){
+    if (i==na){
+      for (;k<K;k++){
+        MOVE(ci[k],bi[j])
+        MOVE(c[k],b[j++])
+    }
+      break;
+    }
+    if (j==nb){
+      for (;k<K;k++){
+        MOVE(ci[k],ai[i])
+        MOVE(c[k],a[i++])
+    }
+      break;
+    }
+  if (LESS(b[j],a[i])){
+    MOVE(ci[k],bi[j])
+    MOVE(c[k],b[j++])
+  }else{
+    MOVE(ci[k],ai[i])
+    MOVE(c[k],a[i++])
+  }
+  }
+}
+
+void ram_integer64_sortmerge_desc(ValueT *c, ValueT *a, ValueT *b, IndexT na, IndexT nb)
+{
+  IndexT i,j,k,K=na+nb-1;
+  for (i=na-1,j=nb-1,k=K;k>=0;k--){
+    if (i<0){
+      for (;k>=0;k--)
+        MOVE(c[k],b[j--])
+      break;
+    }
+    if (j<0){
+      for (;k>=0;k--)
+        MOVE(c[k],a[i--])
+      break;
+    }
+  if (LESS(a[i],b[j]))
+    MOVE(c[k],a[i--])
+  else
+    MOVE(c[k],b[j--])
+  }
+}
+
+void ram_integer64_ordermerge_desc(ValueT *data, IndexT *c, IndexT *a, IndexT *b, IndexT na, IndexT nb)
+{
+  IndexT i,j,k,K=na+nb-1;
+  for (i=na-1,j=nb-1,k=K;k>=0;k--){
+    if (i<0){
+      for (;k>=0;k--)
+        MOVE(c[k],b[j--])
+      break;
+    }
+    if (j<0){
+      for (;k>=0;k--)
+        MOVE(c[k],a[i--])
+      break;
+    }
+  if (KEYLESS(a[i],b[j]))
+    MOVE(c[k],a[i--])
+  else
+    MOVE(c[k],b[j--])
+  }
+}
+
+void ram_integer64_sortordermerge_desc(ValueT *c, ValueT *a, ValueT *b, IndexT *ci, IndexT *ai, IndexT *bi, IndexT na, IndexT nb)
+{
+  IndexT i,j,k,K=na+nb-1;
+  for (i=na-1,j=nb-1,k=K;k>=0;k--){
+    if (i<0){
+      for (;k>=0;k--){
+        MOVE(ci[k],bi[j])
+        MOVE(c[k],b[j--])
+    }
+      break;
+    }
+    if (j<0){
+      for (;k>=0;k--){
+        MOVE(ci[k],ai[i])
+        MOVE(c[k],a[i--])
+    }
+      break;
+    }
+  if (LESS(a[i],b[j])){
+    MOVE(ci[k],ai[i])
+    MOVE(c[k],a[i--])
+  }else{
+    MOVE(ci[k],bi[j])
+    MOVE(c[k],b[j--])
+  }
+  }
+}
+
+
+// merge sorting b ascending leaving result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergesort_asc_rec(
+  ValueT *a   // pointer to target data vector 
+, ValueT *b   // pointer to source data vector 
+, IndexT l    // leftmost position to be sorted
+, IndexT r    // rightmost position to be sorted
+)
+{
+  IndexT m;
+  if (r-l <= INSERTIONSORT_LIMIT_MERGE){
+    ram_integer64_insertionsort_asc(a, l, r);
+    return;
+  }
+  m = (l+r)/2;
+  ram_integer64_mergesort_asc_rec(b, a, l, m);
+  ram_integer64_mergesort_asc_rec(b, a, m+1, r);
+  ram_integer64_sortmerge_asc(a+l, b+l, b+m+1, m-l+1, r-m);
+}
+// merge ordering b ascending leaving result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergeorder_asc_rec(
+ValueT *data  // pointer to data vector
+, IndexT *a   // pointer to target index vector 
+, IndexT *b   // pointer to source index vector 
+, IndexT l    // leftmost position to be sorted
+, IndexT r    // rightmost position to be sorted
+)
+{
+  IndexT m;
+  if (r-l <= INSERTIONSORT_LIMIT_MERGE){
+    ram_integer64_insertionorder_asc(data, a, l, r);
+    return;
+  }
+  m = (l+r)/2;
+  ram_integer64_mergeorder_asc_rec(data, b, a, l, m);
+  ram_integer64_mergeorder_asc_rec(data, b, a, m+1, r);
+  ram_integer64_ordermerge_asc(data, a+l, b+l, b+m+1, m-l+1, r-m);
+}
+// merge sortordering b ascending leaving result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergesortorder_asc_rec(
+  ValueT *a   // pointer to target data vector
+, ValueT *b   // pointer to source data vector
+, IndexT *ai  // pointer to target index vector 
+, IndexT *bi  // pointer to source index vector 
+, IndexT l    // leftmost position to be sorted
+, IndexT r    // rightmost position to be sorted
+)
+{
+  IndexT m;
+  if (r-l <= INSERTIONSORT_LIMIT_MERGE){
+    ram_integer64_insertionsortorder_asc(a, ai, l, r);
+    return;
+  }
+  m = (l+r)/2;
+  ram_integer64_mergesortorder_asc_rec(b, a, bi, ai, l, m);
+  ram_integer64_mergesortorder_asc_rec(b, a, bi, ai, m+1, r);
+  ram_integer64_sortordermerge_asc(a+l, b+l, b+m+1, ai+l, bi+l, bi+m+1, m-l+1, r-m);
+}
+
+
+void ram_integer64_mergesort_desc_rec(ValueT *a, ValueT *b, IndexT l, IndexT r)
+{
+  IndexT m;
+  if (r-l <= INSERTIONSORT_LIMIT_MERGE){
+    ram_integer64_insertionsort_desc(a, l, r);
+    return;
+  }
+  m = (l+r)/2;
+  ram_integer64_mergesort_desc_rec(b, a, l, m);
+  ram_integer64_mergesort_desc_rec(b, a, m+1, r);
+  ram_integer64_sortmerge_desc(a+l, b+l, b+m+1, m-l+1, r-m);
+}
+
+void ram_integer64_mergeorder_desc_rec(ValueT *data, IndexT *a, IndexT *b, IndexT l, IndexT r)
+{
+  IndexT m;
+  if (r-l <= INSERTIONSORT_LIMIT_MERGE){
+    ram_integer64_insertionorder_desc(data, a, l, r);
+    return;
+  }
+  m = (l+r)/2;
+  ram_integer64_mergeorder_desc_rec(data, b, a, l, m);
+  ram_integer64_mergeorder_desc_rec(data, b, a, m+1, r);
+  ram_integer64_ordermerge_desc(data, a+l, b+l, b+m+1, m-l+1, r-m);
+}
+
+void ram_integer64_mergesortorder_desc_rec(ValueT *a, ValueT *b, IndexT *ai, IndexT *bi, IndexT l, IndexT r)
+{
+  IndexT m;
+  if (r-l <= INSERTIONSORT_LIMIT_MERGE){
+    ram_integer64_insertionsortorder_desc(a, ai, l, r);
+    return;
+  }
+  m = (l+r)/2;
+  ram_integer64_mergesortorder_desc_rec(b, a, bi, ai, l, m);
+  ram_integer64_mergesortorder_desc_rec(b, a, bi, ai, m+1, r);
+  ram_integer64_sortordermerge_desc(a+l, b+l, b+m+1, ai+l, bi+l, bi+m+1, m-l+1, r-m);
+}
+
+
+
+
+
+/* } === pure C stable merge sort for integer64 ================================================ */
+
+
+// ascending partitioning of data between l and r around pivot in r
+IndexT ram_integer64_quicksortpart_asc_no_sentinels(
+ValueT *data    // pointer to data
+, IndexT l      // leftmost position to be sorted
+, IndexT r      // rightmost position to be sorted
+)
+{
+  IndexT i = l-1, j = r;
+  ValueT t,v;
+  MOVE(v, data[r])
+  for (;;){
+    ++i; while(LESS(data[i], v)){if (j<=i)break; ++i;}; // explicit stop condition
+    --j; while(LESS(v, data[j])){if (j<=i)break; --j;};  // explicit stop condition
+    if (j<=i)break;
+    EXCH(data[i], data[j], t)
+  }
+  EXCH(data[i], data[r], t)
+  return i;
+}
+IndexT ram_integer64_quicksortpart_desc_no_sentinels(ValueT *data, IndexT l, IndexT r){
+  IndexT i = l-1, j = r;
+  ValueT t,v;
+  MOVE(v, data[r])
+  for (;;){
+    ++i; while(LESS(v, data[i])){if (j<=i)break; ++i;}; // explicit stop condition
+    --j; while(LESS(data[j], v)){if (j<=i)break; --j;};  // explicit stop condition
+    if (j<=i)break;
+    EXCH(data[i], data[j], t)
+  }
+  EXCH(data[i], data[r], t)
+  return i;
+}
+IndexT ram_integer64_quicksortorderpart_asc_no_sentinels(ValueT *data, IndexT *index, IndexT l, IndexT r){
+  IndexT ti, i = l-1, j = r;
+  ValueT t,v;
+  MOVE(v, data[r])
+  for (;;){
+    ++i; while(LESS(data[i], v)){if (j<=i)break; ++i;}; // explicit stop condition
+    --j; while(LESS(v, data[j])){if (j<=i)break; --j;};  // explicit stop condition
+    if (j<=i)break;
+    EXCH(index[i], index[j], ti)
+    EXCH(data[i], data[j], t)
+  }
+  EXCH(index[i], index[r], ti)
+  EXCH(data[i], data[r], t)
+  return i;
+}
+IndexT ram_integer64_quicksortorderpart_desc_no_sentinels(ValueT *data, IndexT *index, IndexT l, IndexT r){
+  IndexT ti,i = l-1, j = r;
+  ValueT t,v;
+  MOVE(v, data[r])
+  for (;;){
+    ++i; while(LESS(v, data[i])){if (j<=i)break; ++i;}; // explicit stop condition
+    --j; while(LESS(data[j], v)){if (j<=i)break; --j;};  // explicit stop condition
+    if (j<=i)break;
+    EXCH(index[i], index[j], ti)
+    EXCH(data[i], data[j], t)
+  }
+  EXCH(index[i], index[r], ti)
+  EXCH(data[i], data[r], t)
+  return i;
+}
+IndexT ram_integer64_quickorderpart_asc_no_sentinels(ValueT *data, IndexT *index, IndexT l, IndexT r){
+  IndexT ti,i = l-1, j = r;
+  ValueT v;
+  MOVE(v, data[index[r]])
+  for (;;){
+    ++i; while(LESS(data[index[i]], v)){if (j<=i)break; ++i;}; // explicit stop condition
+    --j; while(LESS(v, data[index[j]])){if (j<=i)break; --j;};  // explicit stop condition
+    if (j<=i)break;
+    EXCH(index[i], index[j], ti)
+  }
+  EXCH(index[i], index[r], ti)
+  return i;
+}
+IndexT ram_integer64_quickorderpart_desc_no_sentinels(ValueT *data, IndexT *index, IndexT l, IndexT r){
+  IndexT ti, i = l-1, j = r;
+  ValueT v;
+  MOVE(v, data[index[r]])
+  for (;;){
+    ++i; while(LESS(v, data[index[i]])){if (j<=i)break; ++i;}; // explicit stop condition
+    --j; while(LESS(data[index[j]], v)){if (j<=i)break; --j;};  // explicit stop condition
+    if (j<=i)break;
+    EXCH(index[i], index[j], ti)
+  }
+  EXCH(index[i], index[r], ti)
+  return i;
+}
+
+
+
+
+void ram_integer64_quicksort_asc_mdr3_no_sentinels(
+ValueT *data
+, IndexT l, IndexT r
+){
+  if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    ValueT t;
+      IndexT m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+      EXCH(data[m], data[r], t)
+      m = ram_integer64_quicksortpart_asc_no_sentinels(data, l, r);
+      ram_integer64_quicksort_asc_mdr3_no_sentinels(data, l, m-1);
+      ram_integer64_quicksort_asc_mdr3_no_sentinels(data, m+1, r);
+  }
+  else  ram_integer64_insertionsort_asc(data, l, r); 
+}
+void ram_integer64_quicksortorder_asc_mdr3_no_sentinels(ValueT *data, IndexT *index, IndexT l, IndexT r){
+  if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    ValueT t;
+      IndexT ti, m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(index[m], index[r], ti)
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quicksortorderpart_asc_no_sentinels(data, index, l, r);
+      ram_integer64_quicksortorder_asc_mdr3_no_sentinels(data, index, l, m-1);
+      ram_integer64_quicksortorder_asc_mdr3_no_sentinels(data, index, m+1, r);
+  }
+  else  ram_integer64_insertionsortorder_asc(data, index, l, r); 
+}
+void ram_integer64_quickorder_asc_mdr3_no_sentinels(ValueT *data, IndexT *index, IndexT l, IndexT r){
+  if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    ValueT t;
+      IndexT ti, m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(index[m], index[r], ti)
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quickorderpart_asc_no_sentinels(data, index, l, r);
+      ram_integer64_quickorder_asc_mdr3_no_sentinels(data, index, l, m-1);
+      ram_integer64_quickorder_asc_mdr3_no_sentinels(data, index, m+1, r);
+  }
+  else  ram_integer64_insertionorder_asc(data, index, l, r); 
+}
+
+void ram_integer64_quicksort_desc_mdr3_no_sentinels(ValueT *data, IndexT l, IndexT r){
+  if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    ValueT t;
+      IndexT m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quicksortpart_desc_no_sentinels(data, l, r);
+      ram_integer64_quicksort_desc_mdr3_no_sentinels(data, l, m-1);
+      ram_integer64_quicksort_desc_mdr3_no_sentinels(data, m+1, r);
+  }
+  else  ram_integer64_insertionsort_desc(data, l, r); 
+}
+void ram_integer64_quicksortorder_desc_mdr3_no_sentinels(ValueT *data, IndexT *index, IndexT l, IndexT r){
+  if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    ValueT t;
+      IndexT ti, m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(index[m], index[r], ti)
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quicksortorderpart_desc_no_sentinels(data, index, l, r);
+      ram_integer64_quicksortorder_desc_mdr3_no_sentinels(data, index, l, m-1);
+      ram_integer64_quicksortorder_desc_mdr3_no_sentinels(data, index, m+1, r);
+  }
+  else  ram_integer64_insertionsortorder_desc(data, index, l, r); 
+}
+void ram_integer64_quickorder_desc_mdr3_no_sentinels(ValueT *data, IndexT *index, IndexT l, IndexT r){
+  if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    ValueT t;
+      IndexT ti, m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(index[m], index[r], ti)
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quickorderpart_desc_no_sentinels(data, index, l, r);
+      ram_integer64_quickorder_desc_mdr3_no_sentinels(data, index, l, m-1);
+      ram_integer64_quickorder_desc_mdr3_no_sentinels(data, index, m+1, r);
+  }
+  else  ram_integer64_insertionorder_desc(data, index, l, r); 
+}
+
+
+void ram_integer64_quicksort_asc_intro(ValueT *data, IndexT l, IndexT r, int restlevel)
+{
+  IndexT m;
+  if (restlevel>0){
+    if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    ValueT t;
+      m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quicksortpart_asc_no_sentinels(data, l, r);
+    restlevel--;
+    ram_integer64_quicksort_asc_intro(data, l, m-1, restlevel);
+    ram_integer64_quicksort_asc_intro(data, m+1, r, restlevel);
+    }
+    else  ram_integer64_insertionsort_asc(data, l, r); 
+  }else{
+  ram_integer64_shellsort_asc(data, l, r);
+  }
+}
+void ram_integer64_quicksortorder_asc_intro(ValueT *data, IndexT *index, IndexT l, IndexT r, int restlevel)
+{
+  IndexT m;
+  if (restlevel>0){
+    if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    IndexT ti;
+    ValueT t;
+      m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(index[m], index[r], ti)
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quicksortorderpart_asc_no_sentinels(data, index, l, r);
+    restlevel--;
+    ram_integer64_quicksortorder_asc_intro(data, index, l, m-1, restlevel);
+    ram_integer64_quicksortorder_asc_intro(data, index, m+1, r, restlevel);
+    }
+    else  ram_integer64_insertionsortorder_asc(data, index, l, r); 
+  }else{
+  ram_integer64_shellsortorder_asc(data, index, l, r);
+  }
+}
+void ram_integer64_quickorder_asc_intro(ValueT *data, IndexT *index, IndexT l, IndexT r, int restlevel)
+{
+  IndexT m;
+  if (restlevel>0){
+    if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    IndexT ti;
+      m=(l+r)/2;
+      m = ram_integer64_median3index(data, index, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(index[m], index[r], ti)
+      m = ram_integer64_quickorderpart_asc_no_sentinels(data, index, l, r);
+    restlevel--;
+    ram_integer64_quickorder_asc_intro(data, index, l, m-1, restlevel);
+    ram_integer64_quickorder_asc_intro(data, index, m+1, r, restlevel);
+    }
+    else  ram_integer64_insertionorder_asc(data, index, l, r); 
+  }else{
+  ram_integer64_shellorder_asc(data, index, l, r);
+  }
+}
+
+void ram_integer64_quicksort_desc_intro(ValueT *data, IndexT l, IndexT r, int restlevel)
+{
+  IndexT m;
+  if (restlevel>0){
+    if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    ValueT t;
+      m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quicksortpart_desc_no_sentinels(data, l, r);
+    restlevel--;
+    ram_integer64_quicksort_desc_intro(data, l, m-1, restlevel);
+    ram_integer64_quicksort_desc_intro(data, m+1, r, restlevel);
+    }
+    else  ram_integer64_insertionsort_desc(data, l, r); 
+  }else{
+  ram_integer64_shellsort_desc(data, l, r);
+  }
+}
+void ram_integer64_quicksortorder_desc_intro(ValueT *data, IndexT *index, IndexT l, IndexT r, int restlevel)
+{
+  IndexT m;
+  if (restlevel>0){
+    if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    IndexT ti;
+    ValueT t;
+      m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(index[m], index[r], ti)
+    EXCH(data[m], data[r], t)
+      m = ram_integer64_quicksortorderpart_desc_no_sentinels(data, index, l, r);
+    restlevel--;
+    ram_integer64_quicksortorder_desc_intro(data, index, l, m-1, restlevel);
+    ram_integer64_quicksortorder_desc_intro(data, index, m+1, r, restlevel);
+    }
+    else  ram_integer64_insertionsortorder_desc(data, index, l, r); 
+  }else{
+  ram_integer64_shellsortorder_desc(data, index, l, r);
+  }
+}
+void ram_integer64_quickorder_desc_intro(ValueT *data, IndexT *index, IndexT l, IndexT r, int restlevel)
+{
+  IndexT m;
+  if (restlevel>0){
+    if (INSERTIONSORT_LIMIT_QUICK < r-l){
+    IndexT ti;
+      m=(l+r)/2;
+      m = ram_integer64_median3(data, l+randIndex((r-l)/2), m, r-randIndex((r-l)/2));
+    EXCH(index[m], index[r], ti)
+      m = ram_integer64_quickorderpart_desc_no_sentinels(data, index, l, r);
+    restlevel--;
+    ram_integer64_quickorder_desc_intro(data, index, l, m-1, restlevel);
+    ram_integer64_quickorder_desc_intro(data, index, m+1, r, restlevel);
+    }
+    else  ram_integer64_insertionorder_desc(data, index, l, r); 
+  }else{
+  ram_integer64_shellorder_desc(data, index, l, r);
+  }
+}
+
+
+// LSB radix sorting
+void ram_integer64_radixsort(
+  UValueT * data        // RETURNED: pointer to data vector coerced to unsigned
+, UValueT * auxdata     // MODIFIED: pointer to auxilliary data vector coerced to unsigned
+, IndexT * stats        // MODIFIED: pointer to counting vector with nradixes*(pow(2, radixbits)+1) elements
+, IndexT ** pstats      // MODIFIED: pointer to vector of pointers with nradixes elements
+, IndexT n              // number of elements in data and auxdata
+, int nradixes          // number of radixes where nradixes*radixbits==total number of bits
+, int radixbits         // number of bits in radix where nradixes*radixbits==total number of bits
+, Rboolean decreasing   // one of {0=ascending, 1=descending}
+)
+{
+  IndexT w,b,b2,i;
+  int nbuckets = pow(2, radixbits);
+  int nbuckets1 = nbuckets - 1;
+  UValueT bitmask, signmask, tmppatt;
+  int nradixes1 = nradixes-1;
+  int wradixbits;
+  // Rprintf("nradixes=%d radixbits=%d nbuckets=%d\n", nradixes, radixbits, nbuckets); R_FlushConsole();
+  
+  // initialize bitmasks
+  bitmask = 1;
+  for (b=1;b<radixbits;b++)
+    bitmask = bitmask<<1 | 1;
+  signmask = bitmask ^ (bitmask >> 1);
+  
+  // initialize pstats pointer
+  for (w=0;w<nradixes;w++)
+    pstats[w] = stats + w * (nbuckets+1);
+  // initialize stats
+  for (w=0;w<nradixes;w++){
+    stats = pstats[w];
+    for (i=0; i<nbuckets; i++)
+    stats[i] = 0;
+    stats[nbuckets] = 1; // radix-noskip-flag
+  }
+  // count all buckets
+  for (i=0; i<n; i++){
+    tmppatt = data[i];
+    pstats[0][tmppatt & bitmask]++;
+    for (w=1;w<nradixes1;w++)
+      pstats[w][(tmppatt >>= radixbits) & bitmask]++;
+    pstats[nradixes1][ (((tmppatt >> radixbits) & bitmask) ^ signmask) ]++;
+  }
+  // cumulate stats and set skip-radix-flag
+  if (decreasing){
+    for (w=0;w<nradixes;w++){
+      stats = pstats[w];
+      b = stats[nbuckets1];
+      if (b==n)
+      stats[nbuckets] = 0; // radix-noskip-flag
+      stats[nbuckets1] = 0;
+      for (i=nbuckets1-1; i>=0; i--){
+      b2 = stats[i];
+      if (b2==n)
+        stats[nbuckets] = 0; // radix-noskip-flag
+      stats[i] = b;
+      b += b2;
+      }
+    }
+  }else{
+    for (w=0;w<nradixes;w++){
+      stats = pstats[w];
+      b = stats[0];
+      if (b==n)
+      stats[nbuckets] = 0; // radix-noskip-flag
+      stats[0] = 0;
+      for (i=1; i<nbuckets; i++){
+      b2 = stats[i];
+      if (b2==n)
+        stats[nbuckets] = 0; // radix-noskip-flag
+      stats[i] = b;
+      b += b2;
+      }
+    }
+  }
+  // move the data
+  for (b=0,w=0;w<nradixes;w++){
+    stats=pstats[w];
+    // Rprintf("w=%d need=%d\n", w, stats[nbuckets]); R_FlushConsole();
+    if (stats[nbuckets]){ // radix-noskip-flag
+      wradixbits = w*radixbits;
+      if (b%2){
+          if (w==0){
+          for (i=0; i<n; i++){
+          MOVE(data[stats[ auxdata[i] & bitmask]++ ], auxdata[i])
+          }
+        }else if (w<nradixes1){
+          for (i=0; i<n; i++){
+          MOVE(data[stats[ auxdata[i]>>wradixbits & bitmask ]++], auxdata[i])
+          }
+        }else{
+          for (i=0; i<n; i++){
+          MOVE(data[stats[ (((auxdata[i] >> wradixbits) & bitmask) ^ signmask) ]++], auxdata[i])
+          }
+        }
+      }else{
+          if (w==0){
+          for (i=0; i<n; i++){
+          MOVE(auxdata[stats[ data[i] & bitmask ]++], data[i])
+          }
+        }else if (w<nradixes1){
+          for (i=0; i<n; i++){
+          MOVE(auxdata[stats[ data[i]>>wradixbits & bitmask ]++], data[i])
+          }
+        }else{
+          for (i=0; i<n; i++){
+          MOVE(auxdata[stats[ (((data[i] >> wradixbits) & bitmask) ^ signmask) ]++], data[i])
+          }
+        }
+      }
+    b++;
+    }
+  }
+  // copy back in case of odd number of copies
+    if (b%2){
+    for (i=0; i<n; i++)
+    MOVE(data[i], auxdata[i])
+    b++;  
+  }
+  return;
+}
+
+// LSB radix sortordering
+void ram_integer64_radixsortorder(
+  UValueT * data          // RETURNED: pointer to data vector coerced to unsigned
+, UValueT * auxdata       // MODIFIED: pointer to auxilliary data vector coerced to unsigned
+, IndexT * index          // RETURNED: pointer to index vector
+, IndexT * auxindex       // MODIFIED: pointer to auxilliary index vector
+, IndexT * stats          // MODIFIED: pointer to counting vector with nradixes*(pow(2, radixbits)+1) elements
+, IndexT ** pstats        // MODIFIED: pointer to vector of pointers with nradixes elements
+, IndexT n                // number of elements in data and auxdata
+, int nradixes            // number of radixes where nradixes*radixbits==total number of bits
+, int radixbits           // number of bits in radix where nradixes*radixbits==total number of bits
+, Rboolean decreasing     // one of {0=ascending, 1=descending}
+)
+{
+  IndexT w,b,b2,i;
+  int nbuckets = pow(2, radixbits);
+  int nbuckets1 = nbuckets - 1;
+  UValueT bitmask, signmask, tmppatt;
+  int nradixes1 = nradixes-1;
+  int wradixbits;
+  //Rprintf("nradixes=%d radixbits=%d nbuckets=%d\n", nradixes, radixbits, nbuckets); R_FlushConsole();
+  
+  
+  // initialize bitmasks
+  bitmask = 1;
+  for (b=1;b<radixbits;b++)
+    bitmask = bitmask<<1 | 1;
+  signmask = bitmask ^ (bitmask >> 1);
+  
+  // initialize pstats pointer
+  for (w=0;w<nradixes;w++)
+    pstats[w] = stats + w * (nbuckets+1);
+  // initialize stats
+  for (w=0;w<nradixes;w++){
+    stats = pstats[w];
+    for (i=0; i<nbuckets; i++)
+    stats[i] = 0;
+    stats[nbuckets] = 1; // radix-noskip-flag
+  }
+  // count all buckets
+  for (i=0; i<n; i++){
+    tmppatt = data[i];
+    pstats[0][tmppatt & bitmask]++;
+    for (w=1;w<nradixes1;w++)
+      pstats[w][(tmppatt >>= radixbits) & bitmask]++;
+    pstats[nradixes1][ (((tmppatt >> radixbits) & bitmask) ^ signmask) ]++;
+  }
+  // cumulate stats and set skip-radix-flag
+  if (decreasing){
+    for (w=0;w<nradixes;w++){
+      stats = pstats[w];
+      b = stats[nbuckets1];
+      if (b==n)
+      stats[nbuckets] = 0; // radix-noskip-flag
+      stats[nbuckets1] = 0;
+      for (i=nbuckets1-1; i>=0; i--){
+      b2 = stats[i];
+      if (b2==n)
+        stats[nbuckets] = 0; // radix-noskip-flag
+      stats[i] = b;
+      b += b2;
+      }
+    }
+  }else{
+    for (w=0;w<nradixes;w++){
+      stats = pstats[w];
+      b = stats[0];
+      if (b==n)
+      stats[nbuckets] = 0; // radix-noskip-flag
+      stats[0] = 0;
+      for (i=1; i<nbuckets; i++){
+      b2 = stats[i];
+      if (b2==n)
+        stats[nbuckets] = 0; // radix-noskip-flag
+      stats[i] = b;
+      b += b2;
+      }
+    }
+  }
+  // move the data
+  for (b=0,w=0;w<nradixes;w++){
+    stats=pstats[w];
+    if (stats[nbuckets]){ // radix-noskip-flag
+      wradixbits = w*radixbits;
+      if (b%2){
+          if (w==0){
+          for (i=0; i<n; i++){
+            b2 = stats[ auxdata[i] & bitmask ]++;
+          MOVE(index[b2], auxindex[i])
+          MOVE(data[b2], auxdata[i])
+          }
+        }else if (w<nradixes1){
+          for (i=0; i<n; i++){
+            b2 = stats[ auxdata[i]>>wradixbits & bitmask ]++;
+          MOVE(index[b2], auxindex[i])
+          MOVE(data[b2], auxdata[i])
+          }
+        }else{
+          for (i=0; i<n; i++){
+            b2 = stats[ (((auxdata[i] >> wradixbits) & bitmask) ^ signmask) ]++;
+          MOVE(index[b2], auxindex[i])
+          MOVE(data[b2], auxdata[i])
+          }
+        }
+      }else{
+          if (w==0){
+          for (i=0; i<n; i++){
+            b2 = stats[ data[i] & bitmask ]++;
+          MOVE(auxindex[b2], index[i])
+          MOVE(auxdata[b2], data[i])
+          }
+        }else if (w<nradixes1){
+          for (i=0; i<n; i++){
+            b2 = stats[ data[i]>>wradixbits & bitmask ]++;
+          MOVE(auxindex[b2], index[i])
+          MOVE(auxdata[b2], data[i])
+          }
+        }else{
+          for (i=0; i<n; i++){
+            b2 = stats[ (((data[i] >> wradixbits) & bitmask) ^ signmask) ]++;
+          MOVE(auxindex[b2], index[i])
+          MOVE(auxdata[b2], data[i])
+          }
+        }
+      }
+    b++;
+    }
+  }
+  // copy back in case of odd number of copies
+    if (b%2){
+    for (i=0; i<n; i++){
+    MOVE(index[i], auxindex[i])
+    MOVE(data[i], auxdata[i])
+    }
+    b++;  
+  }
+  return;
+}
+
+// LSB radix ordering
+void ram_integer64_radixorder(
+  UValueT * data          // UNCHANGED: pointer to data vector
+, IndexT * index          // RETURNED: pointer to index vector
+, IndexT * auxindex       // MODIFIED: pointer to auxilliary index vector
+, IndexT * stats          // MODIFIED: pointer to counting vector with nradixes*(pow(2, radixbits)+1) elements
+, IndexT ** pstats        // MODIFIED: pointer to vector of pointers with nradixes elements
+, IndexT n                // number of elements in data and auxdata
+, int nradixes            // number of radixes where nradixes*radixbits==total number of bits
+, int radixbits           // number of bits in radix where nradixes*radixbits==total number of bits
+, Rboolean decreasing     // one of {0=ascending, 1=descending}
+)
+{
+  IndexT w,b,b2,i;
+  int nbuckets = pow(2, radixbits);
+  int nbuckets1 = nbuckets - 1;
+  UValueT bitmask, signmask, tmppatt;
+  int nradixes1 = nradixes-1;
+  int wradixbits;
+  //Rprintf("nradixes=%d radixbits=%d nbuckets=%d\n", nradixes, radixbits, nbuckets); R_FlushConsole();
+  
+  
+  // initialize bitmasks
+  bitmask = 1;
+  for (b=1;b<radixbits;b++)
+    bitmask = bitmask<<1 | 1;
+  signmask = bitmask ^ (bitmask >> 1);
+  
+  // initialize pstats pointer
+  for (w=0;w<nradixes;w++)
+    pstats[w] = stats + w * (nbuckets+1);
+  // initialize stats
+  for (w=0;w<nradixes;w++){
+    stats = pstats[w];
+    for (i=0; i<nbuckets; i++)
+    stats[i] = 0;
+    stats[nbuckets] = 1; // radix-noskip-flag
+  }
+  // count all buckets
+  for (i=0; i<n; i++){
+    tmppatt = data[i];  // xx here we save the indirection through index
+    pstats[0][tmppatt & bitmask]++;
+    for (w=1;w<nradixes1;w++)
+      pstats[w][(tmppatt >>= radixbits) & bitmask]++;
+    pstats[nradixes1][ (((tmppatt >> radixbits) & bitmask) ^ signmask) ]++;
+  }
+  // cumulate stats and set skip-radix-flag
+  if (decreasing){
+    for (w=0;w<nradixes;w++){
+      stats = pstats[w];
+      b = stats[nbuckets1];
+      if (b==n)
+      stats[nbuckets] = 0; // radix-noskip-flag
+      stats[nbuckets1] = 0;
+      for (i=nbuckets1-1; i>=0; i--){
+      b2 = stats[i];
+      if (b2==n)
+        stats[nbuckets] = 0; // radix-noskip-flag
+      stats[i] = b;
+      b += b2;
+      }
+    }
+  }else{
+    for (w=0;w<nradixes;w++){
+      stats = pstats[w];
+      b = stats[0];
+      if (b==n)
+      stats[nbuckets] = 0; // radix-noskip-flag
+      stats[0] = 0;
+      for (i=1; i<nbuckets; i++){
+      b2 = stats[i];
+      if (b2==n)
+        stats[nbuckets] = 0; // radix-noskip-flag
+      stats[i] = b;
+      b += b2;
+      }
+    }
+  }
+  // move the data
+  for (b=0,w=0;w<nradixes;w++){
+    stats=pstats[w];
+    if (stats[nbuckets]){ // radix-noskip-flag
+      wradixbits = w*radixbits;
+      if (b%2){
+          if (w==0){
+          for (i=0; i<n; i++){
+            b2 = stats[ data[auxindex[i]] & bitmask ]++;
+          MOVE(index[b2], auxindex[i])
+          }
+        }else if (w<nradixes1){
+          for (i=0; i<n; i++){
+            b2 = stats[ data[auxindex[i]]>>wradixbits & bitmask ]++;
+          MOVE(index[b2], auxindex[i])
+          }
+        }else{
+          for (i=0; i<n; i++){
+            b2 = stats[ (((data[auxindex[i]] >> wradixbits) & bitmask) ^ signmask) ]++;
+          MOVE(index[b2], auxindex[i])
+          }
+        }
+      }else{
+          if (w==0){
+          for (i=0; i<n; i++){
+            b2 = stats[ data[index[i]] & bitmask ]++;
+          MOVE(auxindex[b2], index[i])
+          }
+        }else if (w<nradixes1){
+          for (i=0; i<n; i++){
+            b2 = stats[ data[index[i]]>>wradixbits & bitmask ]++;
+          MOVE(auxindex[b2], index[i])
+          }
+        }else{
+          for (i=0; i<n; i++){
+            b2 = stats[ (((data[index[i]] >> wradixbits) & bitmask) ^ signmask) ]++;
+          MOVE(auxindex[b2], index[i])
+          }
+        }
+      }
+    b++;
+    }
+  }
+  // copy back in case of odd number of copies
+    if (b%2){
+    for (i=0; i<n; i++){
+    MOVE(index[i], auxindex[i])
+    }
+    b++;  
+  }
+  return;
+}
+
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                           LOCAL FUNCTIONS                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+// static
+
+// returns uniform random index in range 0..(n-1)
+static IndexT randIndex(
+  IndexT n    // number of positions to random select from
+){
+  IndexT r;
+  //CRAN disallows rand: while(n <= (r=(((double)rand())*n) /RAND_MAX));
+  // this is by factor 3 slower as long as we keep GetRNGstate(); PutRNGstate(); here.
+  GetRNGstate();
+	while((r = ((IndexT)(unif_rand()*n))) >= n);
+  PutRNGstate();
+  return r;
+}
+
+// returns one of {a,b,c} such that it represents the median of data[{a,b,c}]
+static IndexT ram_integer64_median3(
+ValueT *data  // pointer to data
+, IndexT a    // pos in data
+, IndexT b    // pos in data
+, IndexT c    // pos in data
+)
+{ return LESS(data[a], data[b]) ?
+      (LESS(data[b], data[c]) ? b : LESS(data[a], data[c]) ? c : a)
+    : (LESS(data[c], data[b]) ? b : LESS(data[c], data[a]) ? c : a);
+}
+
+// returns one of {a,b,c} such that it represents the median of data[index[{a,b,c}]]
+static IndexT ram_integer64_median3index(
+  ValueT *data    // pointer to data
+, IndexT *index   // index positions into data
+, IndexT a        // pos in index
+, IndexT b        // pos in index
+, IndexT c        // pos in index
+)
+{ return KEYLESS(index[a], index[b]) ?
+      (KEYLESS(index[b], index[c]) ? b : KEYLESS(index[a], index[c]) ? c : a)
+    : (KEYLESS(index[c], index[b]) ? b : KEYLESS(index[c], index[a]) ? c : a);
+}
+
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                            R/C INTERFACE                                **/
+/**                                                                         **/
+/*****************************************************************************/
+
+SEXP r_ram_integer64_shellsort(
+  SEXP x_            /* data vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+
+  int n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+
+  R_Busy(1);
+  DEBUG_INIT
+    ValueT *data;
+    data = (ValueT *) REAL(x_);
+    
+  if (decreasing)
+      ram_integer64_shellsort_desc(data, 0, n-1);
+  else
+      ram_integer64_shellsort_asc(data, 0, n-1);
+    
+  ret = ram_integer64_fixsortNA(data, n       
+  , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+  , na_last    // 0 for NA NaN left, 1 for NA NaN right
+  , decreasing // 0 for ascending, 1 for descending
+  );
+
+    INTEGER(ret_)[0] = DEBUG_RETURN;
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_shellsortorder(
+  SEXP x_            /* data vector */
+, SEXP index_        /* index vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+  
+  int n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+
+    R_Busy(1);
+    DEBUG_INIT
+      ValueT *data;
+      data = (ValueT *) REAL(x_);
+    IndexT *index = INTEGER(index_);
+
+    if (decreasing)
+        ram_integer64_shellsortorder_desc(data, index, 0, n-1);
+    else
+        ram_integer64_shellsortorder_asc(data, index, 0, n-1);
+    
+    ret = ram_integer64_fixsortorderNA(data, index, n       
+    , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+    , na_last    // 0 for NA NaN left, 1 for NA NaN right
+    , decreasing // 0 for ascending, 1 for descending
+    , 0  // no auxindex
+    );
+
+    INTEGER(ret_)[0] = DEBUG_RETURN;
+    R_Busy(0);
+
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_shellorder(
+  SEXP x_            /* data vector */
+, SEXP index_        /* index vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+  
+  int i,n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+
+    R_Busy(1);
+    DEBUG_INIT
+      ValueT *data;
+      data = (ValueT *) REAL(x_);
+    IndexT *index = INTEGER(index_);
+
+    for (i=0;i<n;i++)
+    index[i]--;
+
+    if (decreasing)
+        ram_integer64_shellorder_desc(data, index, 0, n-1);
+    else
+        ram_integer64_shellorder_asc(data, index, 0, n-1);
+
+    ret = ram_integer64_fixorderNA(data, index, n       
+    , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+    , na_last    // 0 for NA NaN left, 1 for NA NaN right
+    , decreasing // 0 for ascending, 1 for descending
+    , 0  // no auxindex
+    );
+
+    for (i=0;i<n;i++)
+    index[i]++;
+    
+    INTEGER(ret_)[0] = DEBUG_RETURN;
+    R_Busy(0);
+
+  UNPROTECT(1);
+  return ret_;
+}
+
+
+
+SEXP r_ram_integer64_mergesort(
+  SEXP x_            /* data vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+
+  int i,n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+
+  R_Busy(1);
+  DEBUG_INIT
+    ValueT *data;
+      data = (ValueT *) REAL(x_);
+      ValueT *auxdata;
+      auxdata = (ValueT *) R_alloc(n, sizeof(ValueT));
+    
+    for(i=0;i<n;i++){
+      MOVE(auxdata[i], data[i])
+    }
+    
+    if (decreasing)
+        ram_integer64_mergesort_desc_rec(data, auxdata, 0, n-1);
+    else
+        ram_integer64_mergesort_asc_rec(data, auxdata, 0, n-1);
+    ret = ram_integer64_fixsortNA(data, n       
+    , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+    , na_last    // 0 for NA NaN left, 1 for NA NaN right
+    , decreasing // 0 for ascending, 1 for descending
+    );
+    
+  INTEGER(ret_)[0] = DEBUG_RETURN;
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_mergesortorder(
+  SEXP x_            /* data vector */
+, SEXP index_            /* index vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+
+  int i,n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+
+  R_Busy(1);
+  DEBUG_INIT
+  
+  IndexT *index = INTEGER(index_);
+  IndexT *auxindex;
+  auxindex = (IndexT *) R_alloc(n, sizeof(IndexT));
+
+      ValueT *data;
+      data = (ValueT *) REAL(x_);
+      ValueT *auxdata;
+      auxdata = (ValueT *) R_alloc(n, sizeof(ValueT));
+
+    for(i=0;i<n;i++){
+      MOVE(auxindex[i], index[i])
+      MOVE(auxdata[i] ,data[i])
+    }
+    
+    if (decreasing)
+        ram_integer64_mergesortorder_desc_rec(data, auxdata, index, auxindex, 0, n-1);
+    else
+        ram_integer64_mergesortorder_asc_rec(data, auxdata, index, auxindex, 0, n-1);
+    ret = ram_integer64_fixsortorderNA(data, index, n       
+    , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+    , na_last    // 0 for NA NaN left, 1 for NA NaN right
+    , decreasing // 0 for ascending, 1 for descending
+    , auxindex
+    );
+
+  INTEGER(ret_)[0] = DEBUG_RETURN;
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_mergeorder(
+  SEXP x_            /* data vector */
+, SEXP index_        /* index vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+
+  int i,n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+
+  R_Busy(1);
+  DEBUG_INIT
+  
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+  IndexT *index = INTEGER(index_);
+  IndexT *auxindex;
+  auxindex = (IndexT *) R_alloc(n, sizeof(IndexT));
+
+  for (i=0;i<n;i++)
+    index[i]--;
+
+  for(i=0;i<n;i++){
+    MOVE(auxindex[i], index[i]);
+  }
+  
+  if (decreasing)
+      ram_integer64_mergeorder_desc_rec(data, index, auxindex, 0, n-1);
+  else
+      ram_integer64_mergeorder_asc_rec(data, index, auxindex, 0, n-1);
+    
+  ret = ram_integer64_fixorderNA(data, index, n       
+  , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+  , na_last    // 0 for NA NaN left, 1 for NA NaN right
+  , decreasing // 0 for ascending, 1 for descending
+  , auxindex
+  );
+
+  for (i=0;i<n;i++)
+    index[i]++;
+
+  INTEGER(ret_)[0] = DEBUG_RETURN;
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_quicksort(
+  SEXP x_            /* data vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP restlevel_    /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+
+  int n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  int restlevel = asInteger(restlevel_);
+
+  R_Busy(1);
+  DEBUG_INIT
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+    
+  if (decreasing)
+      ram_integer64_quicksort_desc_intro(data, 0, n-1, restlevel);
+  else
+      ram_integer64_quicksort_asc_intro(data, 0, n-1, restlevel);
+    
+  ret = ram_integer64_fixsortNA(data, n       
+  , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+  , na_last    // 0 for NA NaN left, 1 for NA NaN right
+  , decreasing // 0 for ascending, 1 for descending
+  );
+    
+  INTEGER(ret_)[0] = DEBUG_RETURN;
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_quicksortorder(
+  SEXP x_            /* data vector */
+, SEXP index_        /* index vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP restlevel_    /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+  
+  int n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  int restlevel = asInteger(restlevel_);
+
+    R_Busy(1);
+    DEBUG_INIT
+      ValueT *data;
+      data = (ValueT *) REAL(x_);
+    IndexT *index = INTEGER(index_);
+    
+    if (decreasing)
+      ram_integer64_quicksortorder_desc_intro(data, index, 0, n-1, restlevel);
+    else
+      ram_integer64_quicksortorder_asc_intro(data, index, 0, n-1, restlevel);
+    
+    ret = ram_integer64_fixsortorderNA(data, index, n       
+    , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+    , na_last    // 0 for NA NaN left, 1 for NA NaN right
+    , decreasing // 0 for ascending, 1 for descending
+    , 0  // no auxindex
+    );
+    
+    INTEGER(ret_)[0] = DEBUG_RETURN;
+    R_Busy(0);
+
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_quickorder(
+  SEXP x_            /* data vector */
+, SEXP index_        /* index vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP restlevel_    /* logical scalar */
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+  
+  int i,n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  int restlevel = asInteger(restlevel_);
+
+    R_Busy(1);
+    DEBUG_INIT
+      ValueT *data;
+      data = (ValueT *) REAL(x_);
+    IndexT *index = INTEGER(index_);
+    
+    for (i=0;i<n;i++)
+      index[i]--;
+
+    if (decreasing)
+      ram_integer64_quickorder_desc_intro(data, index, 0, n-1, restlevel);
+    else
+      ram_integer64_quickorder_asc_intro(data, index, 0, n-1, restlevel);
+
+    ret = ram_integer64_fixorderNA(data, index, n       
+    , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+    , na_last    // 0 for NA NaN left, 1 for NA NaN right
+    , decreasing // 0 for ascending, 1 for descending
+    , 0  // no auxindex
+    );
+
+    for (i=0;i<n;i++)
+      index[i]++;
+    
+    INTEGER(ret_)[0] = DEBUG_RETURN;
+    R_Busy(0);
+
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_radixsort(
+  SEXP x_            /* data vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP radixbits_
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+  
+  R_Busy(1);
+  DEBUG_INIT
+  
+  IndexT n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  int radixbits = asInteger(radixbits_);
+  int nradixes = 64 / radixbits;
+  
+  
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+  ValueT *auxdata;
+  auxdata = (ValueT *) R_alloc(n, sizeof(ValueT));
+
+  IndexT *stats;
+  stats = (IndexT *) R_alloc(nradixes*(pow(2, radixbits)+1), sizeof(IndexT));
+  IndexT **pstats;
+  pstats = (IndexT **) R_alloc(nradixes, sizeof(IndexT*));
+    
+  ram_integer64_radixsort(
+    (UValueT *) data          
+  , (UValueT *) auxdata
+  , stats
+  , pstats
+  , n
+  , nradixes
+  , radixbits
+  , decreasing
+  );
+  ret = ram_integer64_fixsortNA(data, n       
+  , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+  , na_last    // 0 for NA NaN left, 1 for NA NaN right
+  , decreasing // 0 for ascending, 1 for descending
+  );
+  
+  INTEGER(ret_)[0] = DEBUG_RETURN;  
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+SEXP r_ram_integer64_radixsortorder(
+  SEXP x_            /* data vector */
+, SEXP index_            /* index vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP radixbits_
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+  R_Busy(1);
+  DEBUG_INIT
+  IndexT n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  int radixbits = asInteger(radixbits_);
+  int nradixes = 64 / radixbits;
+  
+  IndexT *index = INTEGER(index_);
+  IndexT *auxindex;
+  auxindex = (IndexT *) R_alloc(n, sizeof(IndexT));
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+  ValueT *auxdata;
+  auxdata = (ValueT *) R_alloc(n, sizeof(ValueT));
+
+  IndexT *stats;
+  stats = (IndexT *) R_alloc(nradixes*(pow(2, radixbits)+1), sizeof(IndexT));
+  IndexT **pstats;
+  pstats = (IndexT **) R_alloc(nradixes, sizeof(IndexT*));
+    
+    ram_integer64_radixsortorder(
+    (UValueT *) data          
+  , (UValueT *) auxdata
+  , index
+  , auxindex
+  , stats
+  , pstats
+  , n
+  , nradixes
+  , radixbits
+  , decreasing   
+  );
+  ret = ram_integer64_fixsortorderNA(data, index, n       
+  , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+  , na_last    // 0 for NA NaN left, 1 for NA NaN right
+  , decreasing // 0 for ascending, 1 for descending
+  , auxindex
+  );
+  
+  INTEGER(ret_)[0] = DEBUG_RETURN;  
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_radixorder(
+  SEXP x_            /* data vector */
+, SEXP index_            /* index vector */
+, SEXP has_na_       /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP radixbits_
+)
+{
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret;
+  R_Busy(1);
+  DEBUG_INIT
+  IndexT i,n = LENGTH(x_);
+  Rboolean has_na     = asLogical(has_na_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  int radixbits = asInteger(radixbits_);
+  int nradixes = 64 / radixbits;
+  
+  IndexT *index = INTEGER(index_);
+  IndexT *auxindex;
+  auxindex = (IndexT *) R_alloc(n, sizeof(IndexT));
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+
+  IndexT *stats;
+  stats = (IndexT *) R_alloc(nradixes*(pow(2, radixbits)+1), sizeof(IndexT));
+  IndexT **pstats;
+  pstats = (IndexT **) R_alloc(nradixes, sizeof(IndexT*));
+    
+  for (i=0;i<n;i++)
+    index[i]--;
+
+    ram_integer64_radixorder(
+    (UValueT *) data          
+  , index
+  , auxindex
+  , stats
+  , pstats
+  , n
+  , nradixes
+  , radixbits
+  , decreasing   
+  );
+  ret = ram_integer64_fixorderNA(data, index, n       
+  , has_na     // 0 for pure doubles, 1 if NA or NaN can be present
+  , na_last    // 0 for NA NaN left, 1 for NA NaN right
+  , decreasing // 0 for ascending, 1 for descending
+  , auxindex
+  );
+
+  for (i=0;i<n;i++)
+    index[i]++;
+  
+  INTEGER(ret_)[0] = DEBUG_RETURN;  
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                                EOF                                      **/
+/**                                                                         **/
+/*****************************************************************************/
diff --git a/src/sort64.h b/src/sort64.h
new file mode 100644
index 0000000..f136391
--- /dev/null
+++ b/src/sort64.h
@@ -0,0 +1,545 @@
+/*
+# C-Header for sorting and ordering
+# S3 atomic 64bit integers for R
+# (c) 2011, 2012 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2012-10-03
+*/
+
+
+#ifndef _SORT64_INLCUDED
+#define _SORT64_INLCUDED
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                            MODULES USED                                 **/
+/**                                                                         **/
+/*****************************************************************************/
+
+#include <R.h>
+#include <Rdefines.h>
+//#include <Rinternals.h>
+//CRAN disallows rand: #include <stdlib.h> // rand
+
+#include "integer64.h"
+//#include "timing.h"
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                      DEFINITIONS AND MACROS                             **/
+/**                                                                         **/
+/*****************************************************************************/
+
+
+#define DEBUG_COUNTING 0
+#define DEBUG_INIT // compare_counter = 0; move_counter = 0; //initTicks();
+//#define DEBUG_RETURN getNewTicks()
+#define DEBUG_RETURN ret;
+// #define DEBUG_RETURN move_counter;
+#define DEBUG_DONE Rprintf("compare_counter=%d  move_counter=%d\n", compare_counter, move_counter); R_FlushConsole(); //doneTicks();
+
+#if defined(WIN32) || defined(WIN64) || defined(_WIN32_) || defined(_WIN64_) || defined(__WIN32__) || defined(__WIN64__) 
+  #define MULTI_THREADING 0
+#else
+  #define MULTI_THREADING 1
+#endif
+
+#if MULTI_THREADING
+#include <pthread.h>
+#endif
+
+// dummy for counting comp ops
+#define COUNTLESS 
+
+#define LESS(A,B) ((A)<(B))
+#define GREATER(A, B) LESS((B), (A))
+
+//#define MOVE(TO,FROM){move_counter++; TO=FROM;}
+#define MOVE(TO,FROM) TO=FROM; 
+#define EXCH(A,B,t) {MOVE(t,A) MOVE(A,B) MOVE(B,t)}
+#define COMPEXCH(A,B,t) if (LESS(B,A)) EXCH(A,B,t)
+
+#define KEY(A) (data[A])
+#define KEYLESS(A,B) (LESS(KEY(A),KEY(B)))
+#define KEYCOMPEXCH(A,B,t) if (KEYLESS(B,A)) EXCH(A,B,t)
+
+#define COMPEXCHi(A,B,t,Ai,Bi,ti) if (LESS(B,A)) {EXCH(A,B,t) EXCH(Ai,Bi,ti)}
+
+#define INSERTIONSORT_LIMIT_MERGE 16
+#define INSERTIONSORT_LIMIT_QUICK 16
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                      TYPEDEFS AND STRUCTURES                            **/
+/**                                                                         **/
+/*****************************************************************************/
+
+typedef int IndexT;
+typedef long long ValueT;
+typedef unsigned long long UValueT;
+
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                        EXPORTED VARIABLES                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+
+#ifndef _SORT64_C_SRC
+
+extern IndexT compare_counter;
+extern IndexT move_counter;
+
+#endif
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                        EXPORTED FUNCTIONS                               **/
+/**                                                                         **/
+/*****************************************************************************/
+
+void R_Busy (int which);
+
+// post sorting NA handling 
+int ram_integer64_fixsortNA(
+  ValueT *data     // RETURNED: pointer to data vector
+, IndexT n         // length of data vector
+, int has_na       // 0 for pure doubles, 1 if NA or NaN can be present
+, int na_last      // 0 for placing NA NaN left, 1 for placing NA NaN right
+, int decreasing   // 0 for ascending, 1 for descending (must match the same parameter in sorting)
+);
+
+// post sortordering NA handling 
+int ram_integer64_fixsortorderNA(
+  ValueT *data       // RETURNED: pointer to data vector
+, IndexT *index      // RETURNED: pointer to index vector
+, IndexT n           // length of vectors
+, int has_na         // 0 for pure doubles, 1 if NA or NaN can be present
+, int na_last        // 0 for placing NA NaN left, 1 for placing NA NaN right
+, int decreasing     // 0 for ascending, 1 for descending (must match the same parameter in sorting)
+, IndexT *auxindex   // MODIFIED: pointer to auxilliary index vector
+);
+
+// post ordering NA handling 
+int ram_integer64_fixorderNA(
+  ValueT *data          // UNCHANGED: pointer to data vector
+, IndexT *index         // RETURNED: pointer to index vector
+, IndexT n              // length of vectors
+, int has_na            // 0 for pure doubles, 1 if NA or NaN can be present
+, int na_last           // 0 for placing NA NaN left, 1 for placing NA NaN right
+, int decreasing        // 0 for ascending, 1 for descending (must match the same parameter in sorting)
+, IndexT *auxindex      // MODIFIED: pointer to auxilliary index vector
+);
+
+
+// ascending insertion sorting
+void ram_integer64_insertionsort_asc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// ascending insertion sortordering
+void ram_integer64_insertionsortorder_asc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// ascending insertion sortordering
+void ram_integer64_insertionorder_asc(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending insertion sorting
+void ram_integer64_insertionsort_desc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending insertion sortordering
+void ram_integer64_insertionsortorder_desc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending insertion sortordering
+void ram_integer64_insertionorder_desc(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+
+// ascending shell sorting
+void ram_integer64_shellsort_asc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// ascending shell sortordering
+void ram_integer64_shellsortorder_asc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// ascending shell sortordering
+void ram_integer64_shellorder_asc(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending shell sorting
+void ram_integer64_shellsort_desc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending shell sortordering
+void ram_integer64_shellsortorder_desc(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending shell sortordering
+void ram_integer64_shellorder_desc(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+
+// ascending merge for sorting
+void ram_integer64_sortmerge_asc(
+  ValueT *c   // RETURNED: pointer to merge target data vector
+, ValueT *a   // UNCHANGED: pointer to merge source data vector a
+, ValueT *b   // UNCHANGED: pointer to merge source data vector b
+, IndexT na   // number of elements in merge source vector a
+, IndexT nb   // number of elements in merge source vector b
+);
+
+// ascending merge for ordering
+void ram_integer64_ordermerge_asc(
+ValueT *data  // UNCHANGED: pointer to data vector
+, IndexT *c   // RETURNED: pointer to merge target index vector
+, IndexT *a   // UNCHANGED: pointer to merge source index vector a
+, IndexT *b   // UNCHANGED: pointer to merge source index vector b
+, IndexT na   // number of elements in merge source vector a
+, IndexT nb   // number of elements in merge source vector b
+);
+
+// ascending merge for sortordering
+void ram_integer64_sortordermerge_asc(
+  ValueT *c     // RETURNED: pointer to merge target data vector
+, ValueT *a     // UNCHANGED: pointer to merge source data vector a
+, ValueT *b     // UNCHANGED: pointer to merge source data vector b
+, IndexT *ci    // RETURNED: pointer to merge target index vector
+, IndexT *ai    // UNCHANGED: pointer to merge source index vector a
+, IndexT *bi    // UNCHANGED: pointer to merge source index vector b
+, IndexT na     // number of elements in merge source vector a
+, IndexT nb     // number of elements in merge source vector b
+);
+
+// descending merge for sorting
+void ram_integer64_sortmerge_desc(
+  ValueT *c   // RETURNED: pointer to merge target data vector
+, ValueT *a   // UNCHANGED: pointer to merge source data vector a
+, ValueT *b   // UNCHANGED: pointer to merge source data vector b
+, IndexT na   // number of elements in merge source vector a
+, IndexT nb   // number of elements in merge source vector b
+);
+
+// descending merge for ordering
+void ram_integer64_ordermerge_desc(
+ValueT *data  // UNCHANGED: pointer to data vector
+, IndexT *c   // RETURNED: pointer to merge target index vector
+, IndexT *a   // UNCHANGED: pointer to merge source index vector a
+, IndexT *b   // UNCHANGED: pointer to merge source index vector b
+, IndexT na   // number of elements in merge source vector a
+, IndexT nb   // number of elements in merge source vector b
+);
+
+// descending merge for sortordering
+void ram_integer64_sortordermerge_desc(
+  ValueT *c     // RETURNED: pointer to merge target data vector
+, ValueT *a     // UNCHANGED: pointer to merge source data vector a
+, ValueT *b     // UNCHANGED: pointer to merge source data vector b
+, IndexT *ci    // RETURNED: pointer to merge target index vector
+, IndexT *ai    // UNCHANGED: pointer to merge source index vector a
+, IndexT *bi    // UNCHANGED: pointer to merge source index vector b
+, IndexT na     // number of elements in merge source vector a
+, IndexT nb     // number of elements in merge source vector b
+);
+
+
+// merge sorts b ascending and leaves result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergesort_asc_rec(
+  ValueT *a   // RETURNED: pointer to target data vector 
+, ValueT *b   // MODIFIED: pointer to source data vector 
+, IndexT l    // leftmost position to be sorted
+, IndexT r    // rightmost position to be sorted
+);
+
+// merge sorting b ascending leaving result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergeorder_asc_rec(
+ValueT *data  // UNCHANGED: pointer to data vector
+, IndexT *a   // RETURNED: pointer to target index vector 
+, IndexT *b   // MODIFIED: pointer to source index vector 
+, IndexT l    // leftmost position to be sorted
+, IndexT r    // rightmost position to be sorted
+);
+
+// merge sortordering b ascending leaving result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergesortorder_asc_rec(
+  ValueT *a   // RETURNED: pointer to target data vector
+, ValueT *b   // MODIFIED: pointer to source data vector
+, IndexT *ai  // RETURNED: pointer to target index vector 
+, IndexT *bi  // MODIFIED: pointer to source index vector 
+, IndexT l    // leftmost position to be sorted
+, IndexT r    // rightmost position to be sorted
+);
+
+
+// merge sorts b descending and leaves result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergesort_desc_rec(
+  ValueT *a   // RETURNED: pointer to target data vector 
+, ValueT *b   // MODIFIED: pointer to source data vector 
+, IndexT l    // leftmost position to be sorted
+, IndexT r    // rightmost position to be sorted
+);
+
+// merge sorting b descending leaving result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergeorder_desc_rec(
+  ValueT *data  // UNCHANGED: pointer to data vector
+, IndexT *a   	// RETURNED: pointer to target index vector 
+, IndexT *b   	// MODIFIED: pointer to source index vector 
+, IndexT l    	// leftmost position to be sorted
+, IndexT r    	// rightmost position to be sorted
+);
+
+// merge sortordering b descending leaving result in a (following Sedgewick 8.4 Mergesort with no copying)
+void ram_integer64_mergesortorder_desc_rec(
+  ValueT *a   // RETURNED: pointer to target data vector
+, ValueT *b   // MODIFIED: pointer to source data vector
+, IndexT *ai  // RETURNED: pointer to target index vector 
+, IndexT *bi  // MODIFIED: pointer to source index vector 
+, IndexT l    // leftmost position to be sorted
+, IndexT r    // rightmost position to be sorted
+);
+
+
+// ascending partitioning of data between l and r around pivot in r for quick sorting
+IndexT ram_integer64_quicksortpart_asc_no_sentinels(
+  ValueT *data  // RETURNED: pointer to data
+, IndexT l      // leftmost position to be sorted
+, IndexT r      // rightmost position to be sorted
+);
+
+// ascending partitioning of data between l and r around pivot in r for quick ordering
+IndexT ram_integer64_quickorderpart_asc_no_sentinels(
+  ValueT *data  // UNCHANGED: pointer to data
+, IndexT *index	// RETURNED: pointer to index
+, IndexT l      // leftmost position to be sorted
+, IndexT r      // rightmost position to be sorted
+);
+
+// ascending partitioning of data between l and r around pivot in r for quick sortordering
+IndexT ram_integer64_quicksortorderpart_asc_no_sentinels(
+  ValueT *data  // RETURNED: pointer to data
+, IndexT *index	// RETURNED: pointer to index
+, IndexT l      // leftmost position to be sorted
+, IndexT r      // rightmost position to be sorted
+);
+
+// descending partitioning of data between l and r around pivot in r for quick sorting
+IndexT ram_integer64_quicksortpart_desc_no_sentinels(
+  ValueT *data  // RETURNED: pointer to data
+, IndexT l      // leftmost position to be sorted
+, IndexT r      // rightmost position to be sorted
+);
+
+// descending partitioning of data between l and r around pivot in r for quick ordering
+IndexT ram_integer64_quickorderpart_desc_no_sentinels(
+  ValueT *data  // UNCHANGED: pointer to data
+, IndexT *index	// RETURNED: pointer to index
+, IndexT l      // leftmost position to be sorted
+, IndexT r      // rightmost position to be sorted
+);
+
+// descending partitioning of data between l and r around pivot in r for quick sortordering
+IndexT ram_integer64_quicksortorderpart_desc_no_sentinels(
+  ValueT *data  // RETURNED: pointer to data
+, IndexT *index	// RETURNED: pointer to index
+, IndexT l      // leftmost position to be sorted
+, IndexT r      // rightmost position to be sorted
+);
+
+
+// ascending quick sorting
+void ram_integer64_quicksort_asc_mdr3_no_sentinels(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// ascending quick sortordering
+void ram_integer64_quicksortorder_asc_mdr3_no_sentinels(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// ascending quick sortordering
+void ram_integer64_quickorder_asc_mdr3_no_sentinels(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending quick sorting
+void ram_integer64_quicksort_desc_mdr3_no_sentinels(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending quick sortordering
+void ram_integer64_quicksortorder_desc_mdr3_no_sentinels(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+// descending quick sortordering
+void ram_integer64_quickorder_desc_mdr3_no_sentinels(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+);
+
+
+// ascending intro sorting (switches to shellsort when no restlevels left)
+void ram_integer64_quicksort_asc_intro(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+, int restlevel	  // number of remaining levels for quicksort recursion before switching to shellsort
+);
+
+// ascending intro sortordering (switches to shellsort when no restlevels left)
+void ram_integer64_quicksortorder_asc_intro(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+, int restlevel	  // number of remaining levels for quicksort recursion before switching to shellsort
+);
+
+// ascending intro sortordering (switches to shellsort when no restlevels left)
+void ram_integer64_quickorder_asc_intro(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+, int restlevel	  // number of remaining levels for quicksort recursion before switching to shellsort
+);
+
+// descending intro sorting (switches to shellsort when no restlevels left)
+void ram_integer64_quicksort_desc_intro(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+, int restlevel	  // number of remaining levels for quicksort recursion before switching to shellsort
+);
+
+// descending intro sortordering (switches to shellsort when no restlevels left)
+void ram_integer64_quicksortorder_desc_intro(
+  ValueT *data    // RETURNED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+, int restlevel	  // number of remaining levels for quicksort recursion before switching to shellsort
+);
+
+// descending intro sortordering (switches to shellsort when no restlevels left)
+void ram_integer64_quickorder_desc_intro(
+  ValueT *data    // UNCHANGED: pointer to data vector
+, IndexT *index   // RETURNED: pointer to index vector
+, IndexT l        // leftmost position to be sorted
+, IndexT r        // rightmost position to be sorted
+, int restlevel	  // number of remaining levels for quicksort recursion before switching to shellsort
+);
+
+
+// LSB radix sorting
+void ram_integer64_radixsort(
+  UValueT * data        // RETURNED: pointer to data vector coerced to unsigned
+, UValueT * auxdata     // MODIFIED: pointer to auxilliary data vector coerced to unsigned
+, IndexT * stats        // MODIFIED: pointer to counting vector with nradixes*(pow(2, radixbits)+1) elements
+, IndexT ** pstats      // MODIFIED: pointer to vector of pointers with nradixes elements
+, IndexT n              // number of elements in data and auxdata
+, int nradixes          // number of radixes where nradixes*radixbits==total number of bits
+, int radixbits         // number of bits in radix where nradixes*radixbits==total number of bits
+, Rboolean decreasing   // one of {0=ascending, 1=descending}
+);
+
+// LSB radix ordering
+void ram_integer64_radixorder(
+  UValueT * data          // UNCHANGED: pointer to data vector
+, IndexT * index          // RETURNED: pointer to index vector
+, IndexT * auxindex       // MODIFIED: pointer to auxilliary index vector
+, IndexT * stats          // MODIFIED: pointer to counting vector with nradixes*(pow(2, radixbits)+1) elements
+, IndexT ** pstats        // MODIFIED: pointer to vector of pointers with nradixes elements
+, IndexT n                // number of elements in data and auxdata
+, int nradixes            // number of radixes where nradixes*radixbits==total number of bits
+, int radixbits           // number of bits in radix where nradixes*radixbits==total number of bits
+, Rboolean decreasing     // one of {0=ascending, 1=descending}
+);
+
+// LSB radix sortordering
+void ram_integer64_radixsortorder(
+  UValueT * data          // RETURNED: pointer to data vector coerced to unsigned
+, UValueT * auxdata       // MODIFIED: pointer to auxilliary data vector coerced to unsigned
+, IndexT * index          // RETURNED: pointer to index vector
+, IndexT * auxindex       // MODIFIED: pointer to auxilliary index vector
+, IndexT * stats          // MODIFIED: pointer to counting vector with nradixes*(pow(2, radixbits)+1) elements
+, IndexT ** pstats        // MODIFIED: pointer to vector of pointers with nradixes elements
+, IndexT n                // number of elements in data and auxdata
+, int nradixes            // number of radixes where nradixes*radixbits==total number of bits
+, int radixbits           // number of bits in radix where nradixes*radixbits==total number of bits
+, Rboolean decreasing     // one of {0=ascending, 1=descending}
+);
+
+#endif
+
+/*****************************************************************************/
+/**                                                                         **/
+/**                                EOF                                      **/
+/**                                                                         **/
+/*****************************************************************************/
diff --git a/src/sortuse64.c b/src/sortuse64.c
new file mode 100644
index 0000000..d245d73
--- /dev/null
+++ b/src/sortuse64.c
@@ -0,0 +1,1302 @@
+/*
+# C-Code for searching and merging
+# S3 atomic 64bit integers for R
+# (c) 2011 Jens Oehlschägel
+# Licence: GPL2
+# Provided 'as is', use at your own risk
+# Created: 2011-12-11
+# Last changed:  2011-12-11
+*/
+
+#include <R.h>
+#include <Rdefines.h>
+//#include <Rinternals.h>
+
+#include "integer64.h"
+#include "bsearch.h"
+
+void R_Busy (int which);
+
+SEXP r_ram_integer64_nacount(
+  SEXP x_
+)
+{
+  int i,n = LENGTH(x_);
+  ValueT *x = (ValueT *) REAL(x_);
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(INTSXP, 1) );
+  int ret = 0;
+  if (n){
+	R_Busy(1);
+	for(i=0;i<n;i++)
+		if (x[i]==NA_INTEGER64)
+			ret++;
+  }  
+  INTEGER(ret_)[0]=ret;
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_issorted_asc(
+  SEXP x_
+)
+{
+  int i,n = LENGTH(x_);
+  ValueT *x = (ValueT *) REAL(x_);
+  SEXP ret_;
+  PROTECT( ret_ = allocVector(LGLSXP, 1) );
+  Rboolean ret = TRUE;
+  if (n){
+	R_Busy(1);
+	for(i=1;i<n;i++)
+		if (x[i]<x[i-1]){
+			ret = FALSE;
+			goto wrapup;
+		}
+  }  
+wrapup:  
+  INTEGER(ret_)[0]=ret;
+  R_Busy(0);
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_sortnut(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP ret_
+)
+{
+  int i,lasti,ities,nties=0,nunique=0,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  PROTECT( ret_ = allocVector(INTSXP, 2) );
+  if (n){
+	R_Busy(1);
+	nunique=1;
+	lasti = 0;
+	for(i=1;i<n;i++){
+		if (sorted[i]!=sorted[lasti]){
+			ities = i - lasti;
+			if (ities>1)
+				nties += ities;
+			nunique++;
+			lasti=i;
+		}
+	}
+	if (lasti<(n-1))
+		nties += n - lasti;
+	R_Busy(0);
+  }  
+  INTEGER(ret_)[0]=nunique;
+  INTEGER(ret_)[1]=nties;
+  UNPROTECT(1);
+  return ret_;
+}
+
+SEXP r_ram_integer64_ordernut(
+  SEXP table_
+, SEXP order_
+, SEXP ret_
+)
+{
+  int i,lasti,ities,nties=0,nunique=0,n = LENGTH(table_);
+  ValueT *table;
+  table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+  PROTECT( ret_ = allocVector(INTSXP, 2) );
+  if (n){
+	R_Busy(1);
+	nunique=1;
+	lasti = 0;
+	for(i=1;i<n;i++){
+		if (table[index[i]-1]!=table[index[lasti]-1]){
+			ities = i - lasti;
+			if (ities>1)
+				nties += ities;
+			nunique++;
+			lasti=i;
+		}
+	}
+	if (lasti<(n-1))
+		nties += n - lasti;
+	R_Busy(0);
+  }  
+  INTEGER(ret_)[0]=nunique;
+  INTEGER(ret_)[1]=nties;
+  UNPROTECT(1);
+  return ret_;
+}
+
+
+
+SEXP r_ram_integer64_sortfin_asc(
+  SEXP x_            /* data vector */
+, SEXP sorted_            /* sorted table vector */
+, SEXP method_
+, SEXP ret_
+)
+{
+  int i,n = LENGTH(x_);
+  int pos,nt = LENGTH(sorted_);
+  int n1 = nt-1;
+  int method = asInteger(method_);
+
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+  ValueT *sorted;
+  sorted = (ValueT *) REAL(sorted_);
+
+  int *ret = LOGICAL(ret_);
+
+  R_Busy(1);
+  DEBUG_INIT
+  
+  switch (method){
+    case 1:{
+		for(i=0;i<n;i++)
+			ret[i] = integer64_bsearch_asc_EQ(sorted, 0, n1, data[i])<0 ? FALSE : TRUE;
+		break;
+	}
+    case 2:{
+	    pos = 0;
+		for(i=0;i<n;i++){
+			pos = integer64_lsearch_asc_GE(sorted, pos, n1, data[i]);
+			if (pos>n1){
+			  for (;i<n;i++)
+			    ret[i] = FALSE;
+			}else{
+				ret[i] = data[i]==sorted[pos] ? TRUE : FALSE;
+			}
+		}
+		break;
+	}
+    case 3:{
+	  pos = 0;
+	  for (i=0;i<n;i++){
+		while(LESS(sorted[pos], data[i])){
+		  pos++;
+		  if (pos==nt){
+			for (;i<n;i++)
+			  ret[i] = FALSE;
+			goto wrapup;
+		  }
+		}
+		ret[i] = data[i]==sorted[pos] ? TRUE : FALSE;
+	  }
+	  break;
+	}
+    default:
+	  method=0;
+  }	
+
+wrapup:  
+	  R_Busy(0);
+  if (method==0)
+    error("unimplemented method");
+  return ret_;
+}
+
+SEXP r_ram_integer64_orderfin_asc(
+  SEXP x_            /* data vector */
+, SEXP table_            /* table vector */
+, SEXP order_            /* order vector that makes table_ sorted */
+, SEXP method_
+, SEXP ret_
+)
+{
+  int i,n = LENGTH(x_);
+  int pos,nt = LENGTH(table_);
+  int n1 = nt-1;
+  int method = asInteger(method_);
+
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+  ValueT *table;
+  table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+
+  int *ret = LOGICAL(ret_);
+
+  R_Busy(1);
+  DEBUG_INIT
+
+  for(i=0;i<nt;i++)
+    index[i]--;
+  
+  switch (method){
+    case 1:{
+		for(i=0;i<n;i++){
+			ret[i] = integer64_bosearch_asc_EQ(table, index, 0, n1, data[i])<0 ? FALSE : TRUE;
+		}
+		break;
+	}
+    case 2:{
+	    pos = 0;
+		for(i=0;i<n;i++){
+			pos = integer64_losearch_asc_GE(table, index, pos, n1, data[i]);
+			if (pos>n1){
+			  for (;i<n;i++)
+			    ret[i] = FALSE;
+			}else{
+				ret[i] = data[i]==table[index[pos]] ? TRUE : FALSE;
+			}
+		}
+		break;
+	}
+    case 3:{
+	  pos = 0;
+	  for (i=0;i<n;i++){
+		while(LESS(table[index[pos]], data[i])){
+		  pos++;
+		  if (pos==nt){
+			for (;i<n;i++)
+			  ret[i] = FALSE;
+			goto wrapup;
+		  }
+		}
+		ret[i] = data[i]==table[index[pos]] ? TRUE : FALSE;
+	  }
+	  break;
+	}
+    default:
+	  method=0;
+  }	
+
+wrapup:  
+  for(i=0;i<nt;i++)
+    index[i]++;
+  
+	  R_Busy(0);
+  if (method==0)
+    error("unimplemented method");
+  return ret_;
+}
+
+
+
+SEXP r_ram_integer64_orderpos_asc(
+  SEXP x_            /* data vector */
+, SEXP table_            /* table vector */
+, SEXP order_            /* order vector that makes table_ sorted */
+, SEXP nomatch_
+, SEXP method_
+, SEXP ret_
+)
+{
+  int i,n = LENGTH(x_);
+  int pos,nt = LENGTH(table_);
+  int n1 = nt-1;
+  int method = asInteger(method_);
+  int nomatch = asInteger(nomatch_);
+
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+  ValueT *table;
+  table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+
+  int *ret = INTEGER(ret_);
+
+  R_Busy(1);
+  DEBUG_INIT
+
+  for(i=0;i<nt;i++)
+    index[i]--;
+  
+  switch (method){
+    case 1:{
+		for(i=0;i<n;i++){
+			pos = integer64_bosearch_asc_EQ(table, index, 0, n1, data[i]);
+			ret[i] = pos<0 ? nomatch : index[pos]+ 1;
+		}
+		break;
+	}
+    case 2:{
+	    pos = 0;
+		for(i=0;i<n;i++){
+			pos = integer64_losearch_asc_GE(table, index, pos, n1, data[i]);
+			if (pos>n1){
+			  for (;i<n;i++)
+			    ret[i] = nomatch;
+			}else{
+				ret[i] = data[i]==table[index[pos]] ? index[pos]+1 : nomatch;
+			}
+		}
+		break;
+	}
+    case 3:{
+	  pos = 0;
+	  for (i=0;i<n;i++){
+		while(LESS(table[index[pos]], data[i])){
+		  pos++;
+		  if (pos==nt){
+			for (;i<n;i++)
+			  ret[i] = nomatch;
+			goto wrapup;
+		  }
+		}
+		ret[i] = data[i]==table[index[pos]] ? index[pos]+1 : nomatch;
+	  }
+	  break;
+	}
+    default:
+	  method=0;
+  }	
+
+wrapup:  
+  for(i=0;i<nt;i++)
+    index[i]++;
+  
+	  R_Busy(0);
+  if (method==0)
+    error("unimplemented method");
+  return ret_;
+}
+
+/* 1= simple binary search of unsorted in sorted
+   2= double exponential search of sorted in sorted 
+   3= merge-search of sorted in sorted
+*/ 
+SEXP r_ram_integer64_sortorderpos_asc(
+  SEXP x_            /* data vector */
+, SEXP sorted_            /* sorted table vector */
+, SEXP order_            /* order vector that makes table_ sorted */
+, SEXP nomatch_
+, SEXP method_  
+, SEXP ret_
+)
+{
+  int i,n = LENGTH(x_);
+  int pos,nt = LENGTH(sorted_);
+  int n1 = nt-1;
+  int method = asInteger(method_);
+  int nomatch = asInteger(nomatch_);
+
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+  ValueT *sorted;
+  sorted = (ValueT *) REAL(sorted_);
+  IndexT *index = INTEGER(order_);
+
+  int *ret = INTEGER(ret_);
+
+  R_Busy(1);
+  DEBUG_INIT
+
+  switch (method){
+    case 1:{
+		for(i=0;i<n;i++){
+			pos = integer64_bsearch_asc_EQ(sorted, 0, n1, data[i]);
+			ret[i] = pos<0 ? nomatch : index[pos];
+		}
+		break;
+	}
+    case 2:{
+	    pos = 0;
+		for(i=0;i<n;i++){
+			pos = integer64_lsearch_asc_GE(sorted, pos, n1, data[i]);
+			if (pos>n1){
+			  for (;i<n;i++)
+			    ret[i] = nomatch;
+			}else{
+				ret[i] = data[i]==sorted[pos] ? index[pos] : nomatch;
+			}
+		}
+		break;
+	}
+    case 3:{
+	  pos = 0;
+	  for (i=0;i<n;i++){
+		while(LESS(sorted[pos], data[i])){
+		  pos++;
+		  if (pos==nt){
+			for (;i<n;i++)
+			  ret[i] = nomatch;
+			goto wrapup;
+		  }
+		}
+		ret[i] = data[i]==sorted[pos] ? index[pos] : nomatch;
+	  }
+		break;
+	}
+    default:
+	  method=0;
+  }	
+
+wrapup:  
+	  R_Busy(0);
+  if (method==0)
+    error("unimplemented method");
+  return ret_;
+}
+
+
+
+SEXP r_ram_integer64_sortuni_asc(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP ret_
+)
+{
+  int i,pos,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  ValueT * ret = (ValueT *) REAL(ret_);
+  if (n){
+	  R_Busy(1);
+	  pos = 0;
+	  ret[0] = sorted[0];
+	  for(i=1;i<n;i++)
+	    if (sorted[i]!=ret[pos])
+			ret[++pos] = sorted[i];
+	  R_Busy(0);
+  }
+  return ret_;
+
+}
+
+SEXP r_ram_integer64_sortorderuni_asc(
+  SEXP table_            /* table vector */
+, SEXP sorted_            /* somehow sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP ret_
+)
+{
+  int i,pos,n = LENGTH(table_);
+  ValueT *table = (ValueT *) REAL(table_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  IndexT *index = INTEGER(order_);
+  ValueT * ret = (ValueT *) REAL(ret_);
+  ValueT lastval;
+  if (n){
+	  R_Busy(1);
+	  IndexT nbitflags = n/BITS_INTEGER64+(n%BITS_INTEGER64 ? 1 : 0);
+	  ValueT *bitflags;
+	  bitflags = (ValueT *) R_alloc(nbitflags, sizeof(ValueT));
+	  for (i=0;i<nbitflags;i++)
+		bitflags[i]=0;
+	  lastval = sorted[0]; 
+	  bitflags[(index[0]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[0]-1) % BITS_INTEGER64));
+	  for(i=1;i<n;i++)
+	    if (sorted[i]!=lastval){
+			bitflags[(index[i]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[i]-1) % BITS_INTEGER64));
+			lastval = sorted[i];
+		}
+	  pos = 0;
+	  for(i=0;i<n;i++)
+		if ((bitflags[i/BITS_INTEGER64] & (RIGHTBIT_INTEGER64 << (i % BITS_INTEGER64))))
+			ret[pos++] = table[i];
+	  R_Busy(0);
+  }
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_orderuni_asc(
+  SEXP table_            /* sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP keep_order_            /* sorted table vector */
+, SEXP ret_
+)
+{
+  int i,pos,n = LENGTH(table_);
+  ValueT *table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+  ValueT * ret = (ValueT *) REAL(ret_);
+  ValueT val, lastval;
+  if (n){
+	  R_Busy(1);
+	  if (asLogical(keep_order_)){
+	      IndexT nbitflags = n/BITS_INTEGER64+(n%BITS_INTEGER64 ? 1 : 0);
+		  ValueT *bitflags;
+		  bitflags = (ValueT *) R_alloc(nbitflags, sizeof(ValueT));
+		  for (i=0;i<nbitflags;i++)
+		    bitflags[i]=0;
+		  lastval = table[index[0]-1];
+		  bitflags[(index[0]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[0]-1) % BITS_INTEGER64));
+		  for(i=1;i<n;i++){
+		    pos = index[i]-1;
+			if (table[pos]!=lastval){
+				bitflags[pos/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << (pos % BITS_INTEGER64));
+				lastval = table[pos];
+			}
+		  }
+		  pos = 0;
+		  for(i=0;i<n;i++)
+			if ((bitflags[i/BITS_INTEGER64] & (RIGHTBIT_INTEGER64 << (i % BITS_INTEGER64))))
+				ret[pos++] = table[i];
+	  }else{
+		  lastval = table[index[0]-1];
+		  ret[0] = lastval;
+		  pos=1;
+		  for(i=1;i<n;i++){
+			val = table[index[i]-1];
+			if (val!=lastval){
+				ret[pos++] = val;
+				lastval = val;
+			}
+		  }
+	  }
+	  R_Busy(0);
+  }
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_sortorderupo_asc(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP keep_order_            /* sorted table vector */
+, SEXP ret_
+)
+{
+  int i,pos,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  IndexT *index = INTEGER(order_);
+  IndexT *ret = INTEGER(ret_);
+  ValueT lastval;
+  
+  if (n){
+	R_Busy(1);
+	if (asLogical(keep_order_)){
+		IndexT nbitflags = n/BITS_INTEGER64+(n%BITS_INTEGER64 ? 1 : 0);
+		ValueT *bitflags;
+		bitflags = (ValueT *) R_alloc(nbitflags, sizeof(ValueT));
+		for (i=0;i<nbitflags;i++)
+			bitflags[i]=0;
+		lastval = sorted[0];
+		bitflags[(index[0]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[0]-1) % BITS_INTEGER64));
+		for(i=1;i<n;i++)
+			if (sorted[i]!=lastval){
+				bitflags[(index[i]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[i]-1) % BITS_INTEGER64));
+				lastval = sorted[i];
+			}
+		pos = 0;
+		for(i=0;i<n;i++)
+			if ((bitflags[i/BITS_INTEGER64] & (RIGHTBIT_INTEGER64 << (i % BITS_INTEGER64))))
+				ret[pos++] = i+1;
+	}else{
+		ret[0] = index[0];
+		pos=1;
+		for(i=1;i<n;i++){
+			if (sorted[i]!=sorted[i-1]){
+				ret[pos++] = index[i];
+			}
+		}
+	}
+	R_Busy(0);
+  }
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_orderupo_asc(
+  SEXP table_            /* sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP keep_order_            /* sorted table vector */
+, SEXP ret_
+)
+{
+  int i,pos,n = LENGTH(table_);
+  ValueT *table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+  IndexT * ret = INTEGER(ret_);
+  ValueT lastval;
+  if (n){
+	  R_Busy(1);
+	  if (asLogical(keep_order_)){
+	      IndexT nbitflags = n/BITS_INTEGER64+(n%BITS_INTEGER64 ? 1 : 0);
+		  ValueT *bitflags;
+		  bitflags = (ValueT *) R_alloc(nbitflags, sizeof(ValueT));
+		  for (i=0;i<nbitflags;i++)
+		    bitflags[i]=0;
+		  lastval = table[index[0]-1];
+		  bitflags[(index[0]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[0]-1) % BITS_INTEGER64));
+		  for(i=1;i<n;i++){
+		    pos = index[i]-1;
+			if (table[pos]!=lastval){
+				bitflags[pos/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << (pos % BITS_INTEGER64));
+				lastval = table[pos];
+			}
+		  }
+		  pos = 0;
+		  for(i=0;i<n;i++)
+			if ((bitflags[i/BITS_INTEGER64] & (RIGHTBIT_INTEGER64 << (i % BITS_INTEGER64))))
+				ret[pos++] = i+1;
+	  }else{
+		  ret[0] = index[0];
+		  pos=1;
+		  for(i=1;i<n;i++){
+			if ((table[index[i]-1])!=(table[index[i-1]-1])){
+				ret[pos++] = index[i];
+			}
+		  }
+	  }
+	  R_Busy(0);
+  }
+  return ret_;
+}
+
+
+
+
+SEXP r_ram_integer64_sorttab_asc(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP ret_
+)
+{
+  int i,pos,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  IndexT * ret = INTEGER(ret_);
+  if (n){
+	  R_Busy(1);
+	  pos = 0;
+	  ret[0] = 1;
+	  for(i=1;i<n;i++){
+	    if (sorted[i]!=sorted[i-1])
+			ret[++pos] = 1;
+		else
+			ret[pos]++;
+	  }
+	  R_Busy(0);
+  }
+  return ret_;
+
+}
+
+SEXP r_ram_integer64_ordertab_asc(
+  SEXP table_            /* sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP denormalize_            /* sorted table vector */
+, SEXP keep_order_            
+, SEXP ret_
+)
+{
+  int i,j,pos,n = LENGTH(table_);
+  ValueT *table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+  IndexT * ret = INTEGER(ret_);
+  int cnt;
+  if (n){
+	  PROTECT(ret_); /* because of R_Busy wee need PROTECT, according to Thomas Kalibera */  
+	  R_Busy(1);
+	  if (asLogical(denormalize_)){
+	      j = 0;
+		  cnt=1;
+		  pos = index[j]-1;
+		  for(i=1;i<n;i++){
+			if (table[pos]!=table[index[i]-1]){
+				for (;j<i;j++)
+					ret[index[j]-1] = cnt;
+				cnt = 1;
+				j = i;
+				pos = index[j]-1;
+			}else{
+				cnt++;
+			}
+		  }
+		for (;j<i;j++)
+			ret[index[j]-1] = cnt;
+	  }else if (asLogical(keep_order_)){
+		  pos = index[0]-1;
+		  ret[pos] = 1;
+		  for(i=1;i<n;i++){
+		    j = index[i]-1;
+			if (table[pos]!=table[j]){
+				pos = j;
+				ret[pos] = 1;
+			}else{
+			    ret[pos]++;
+				ret[j]=0;
+			}
+		  }
+		  pos = 0;
+		  for(i=0;i<n;i++)
+		    if (ret[i])
+			  ret[pos++] = ret[i];
+		  SET_LENGTH(ret_, pos);
+	  }else{
+		  j = 0;
+		  ret[j] = 1;
+		  pos = index[j]-1;
+		  for(i=1;i<n;i++){
+			if (table[index[i]-1]!=table[pos]){
+				pos = index[i]-1;
+				ret[++j] = 1;
+			}else{
+			    ret[j]++;
+			}
+		  }
+	  }
+	  R_Busy(0);
+	  UNPROTECT(1);
+  }
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_sortordertab_asc(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP denormalize_            
+, SEXP ret_
+)
+{
+  int i,pos,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  IndexT *index = INTEGER(order_);
+  IndexT * ret = INTEGER(ret_);
+  int cnt;
+  if (n){
+          PROTECT(ret_); /* because of R_Busy wee need PROTECT, according to Thomas Kalibera */
+	  R_Busy(1);
+	  if (asLogical(denormalize_)){
+			  pos = 0;
+			  cnt = 1;
+			  for(i=1;i<n;i++){
+				if (sorted[i]!=sorted[pos]){
+					for (;pos<i;pos++)
+						ret[index[pos]-1] = cnt;
+					cnt = 1;
+					pos = i;
+				}else{
+					cnt++;
+				}
+			  }
+			for (;pos<i;pos++)
+				ret[index[pos]-1] = cnt;
+	  }else{
+			  pos = index[0]-1;
+			  ret[pos] = 1;
+			  for(i=1;i<n;i++){
+				if (sorted[i]!=sorted[i-1]){
+					pos = index[i]-1;
+					ret[pos] = 1;
+				}else{
+					ret[pos]++;
+					ret[index[i]-1]=0;
+				}
+			  }
+			  pos = 0;
+			  for(i=0;i<n;i++)
+				if (ret[i])
+				  ret[pos++] = ret[i];
+			  SET_LENGTH(ret_, pos);
+	  }
+	  R_Busy(0);
+          UNPROTECT(1);
+  }
+  return ret_;
+}
+
+
+// with na_skip_num==0 this is the proper version doing proper star schema modelling: 
+// NAs receive a key value like all other values, such that they can be joined with the dimension table
+// with na_skip_num==na_count this is the sick version needed for as.factor/as.ordered: 
+// NAs are propagated and can not be joined with the dimension table / NAs are not in levels
+// this breaks all previous consistent modelling
+SEXP r_ram_integer64_orderkey_asc(
+  SEXP table_            /* sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP na_skip_num_     /* number of NAs to be skiped before keying, must be 0 or na_count */
+, SEXP ret_
+)
+{
+  int i,j,pos,n = LENGTH(table_);
+  ValueT *table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+  IndexT na_skip_num = asInteger(na_skip_num_);
+  IndexT * ret = INTEGER(ret_);
+  IndexT key;
+  if (n){
+	  R_Busy(1);
+		  for (i=0;i<na_skip_num;i++)
+			ret[index[i]-1] = NA_INTEGER;
+		  if (na_skip_num<n){
+			  key = 1;
+			  pos = index[na_skip_num]-1;
+			  ret[pos]= key;
+			  for(i=na_skip_num+1;i<n;i++){
+				j = index[i]-1;
+				if (table[pos]!=table[j]){
+					pos = j;
+					key++;	
+				}
+				ret[j] = key;
+			  }
+		  }
+	  R_Busy(0);
+  }
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_sortorderkey_asc(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP na_skip_num_     /* number of NAs to be skiped before keying, must be 0 or na_count */
+, SEXP ret_
+)
+{
+  int i,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  IndexT *index = INTEGER(order_);
+  IndexT na_skip_num = asInteger(na_skip_num_);
+  IndexT * ret = INTEGER(ret_);
+  IndexT key;
+  if (n){
+	R_Busy(1);
+	for (i=0;i<na_skip_num;i++)
+		ret[index[i]-1] = NA_INTEGER;
+	if (na_skip_num<n){
+	  key = 1;
+	  ret[index[na_skip_num]-1]= key;
+	  for(i=na_skip_num+1;i<n;i++){
+		if (sorted[i]!=sorted[i-1]){
+			key++;	
+		}
+		ret[index[i]-1] = key;
+	  }
+	}
+	R_Busy(0);
+  }
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_orderrnk_asc(
+  SEXP table_            /* sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP nacount_
+, SEXP ret_
+)
+{
+  int i,j,pos,n = LENGTH(table_);
+  ValueT *table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+  double * ret = REAL(ret_);
+  double avgrank;
+  int nacount = asInteger(nacount_);
+  int lasti;
+  if (n){
+	  R_Busy(1);
+		  for (i=0;i<nacount;i++)
+			ret[index[i]-1] = NA_REAL;
+		  index += nacount;
+		  n -= nacount;
+	      lasti = 0;
+		  pos = index[0]-1;
+		  for(i=1;i<n;i++){
+		    j = index[i]-1;
+			if (table[pos]!=table[j]){
+				pos = j;
+				avgrank = (lasti + 1 + i)/2.0;	
+				for (j=i-1;j>=lasti;j--)
+				  ret[index[j]-1] = avgrank;
+			    lasti = i;
+			}
+		  }
+		  avgrank = (lasti + 1 + i)/2.0;	
+		  for (j=i-1;j>=lasti;j--)
+		    ret[index[j]-1] = avgrank;
+		  
+	  R_Busy(0);
+  }
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_sortorderrnk_asc(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP nacount_
+, SEXP ret_
+)
+{
+  int i,j,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  IndexT *index = INTEGER(order_);
+  double * ret = REAL(ret_);
+  double avgrank;
+  int nacount = asInteger(nacount_);
+  int lasti;
+  if (n){
+	  R_Busy(1);
+		  for (i=0;i<nacount;i++)
+			ret[index[i]-1] = NA_REAL;
+		  index += nacount;
+		  sorted += nacount;
+		  n -= nacount;
+	      lasti = 0;
+		  for(i=1;i<n;i++){
+			if (sorted[i]!=sorted[i-1]){
+				avgrank = (lasti + 1 + i)/2.0;	
+				for (j=i-1;j>=lasti;j--)
+				  ret[index[j]-1] = avgrank;
+			    lasti = i;
+			}
+		  }
+		  avgrank = (lasti + 1 + i)/2.0;	
+		  for (j=i-1;j>=lasti;j--)
+		    ret[index[j]-1] = avgrank;
+	  R_Busy(0);
+  }
+  return ret_;
+}
+
+
+
+
+SEXP r_ram_integer64_orderdup_asc(
+  SEXP table_            /* sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP method_
+, SEXP ret_
+)
+{
+  int i,pos,n = LENGTH(table_);
+  ValueT *table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+  int method = asInteger(method_);
+  int * ret = LOGICAL(ret_);
+  ValueT lastval;
+  if (n){
+	  R_Busy(1);
+	  switch (method){
+		case 1:{
+		  for (i=0;i<n;i++)
+			ret[i]=TRUE;
+		  lastval = table[index[0]-1];
+		  ret[index[0]-1] = FALSE;
+		  for(i=1;i<n;i++){
+			pos = index[i]-1;
+			if (table[pos]!=lastval){
+				ret[pos] = FALSE;
+				lastval = table[pos];
+			}
+		  }
+		  break;
+		}
+		case 2:{
+		  IndexT nbitflags = n/BITS_INTEGER64+(n%BITS_INTEGER64 ? 1 : 0);
+		  ValueT *bitflags;
+		  bitflags = (ValueT *) R_alloc(nbitflags, sizeof(ValueT));
+		  for (i=0;i<nbitflags;i++)
+			bitflags[i]=0;
+		  lastval = table[index[0]-1];
+		  bitflags[(index[0]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[0]-1) % BITS_INTEGER64));
+		  for(i=1;i<n;i++){
+			pos = index[i]-1;
+			if (table[pos]!=lastval){
+				bitflags[pos/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << (pos % BITS_INTEGER64));
+				lastval = table[pos];
+			}
+		  }
+		  for(i=0;i<n;i++)
+			ret[i] = ((bitflags[i/BITS_INTEGER64] & (RIGHTBIT_INTEGER64 << (i % BITS_INTEGER64)))) ? FALSE : TRUE;
+		  break;
+		}
+		default:
+		  method=0;
+	  }	
+	  R_Busy(0);
+  }
+  if (method==0)
+    error("unimplemented method");
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_sortorderdup_asc(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP method_
+, SEXP ret_
+)
+{
+  int i,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  IndexT *index = INTEGER(order_);
+  int method = asInteger(method_);
+  int * ret = LOGICAL(ret_);
+  if (n){
+	  R_Busy(1);
+	  switch (method){
+		case 1:{
+		  for (i=0;i<n;i++)
+			ret[i]=TRUE;
+		  ret[index[0]-1] = FALSE;
+		  for(i=1;i<n;i++){
+			if (sorted[i]!=sorted[i-1]){
+				ret[index[i]-1] = FALSE;
+			}
+		  }
+		  break;
+		}
+		case 2:{
+		  IndexT nbitflags = n/BITS_INTEGER64+(n%BITS_INTEGER64 ? 1 : 0);
+		  ValueT *bitflags;
+		  bitflags = (ValueT *) R_alloc(nbitflags, sizeof(ValueT));
+		  for (i=0;i<nbitflags;i++)
+			bitflags[i]=0;
+		  bitflags[(index[0]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[0]-1) % BITS_INTEGER64));
+		  for(i=1;i<n;i++){
+			if (sorted[i]!=sorted[i-1]){
+				bitflags[(index[i]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[i]-1) % BITS_INTEGER64));
+			}
+		  }
+		  for(i=0;i<n;i++)
+			ret[i] = ((bitflags[i/BITS_INTEGER64] & (RIGHTBIT_INTEGER64 << (i % BITS_INTEGER64)))) ? FALSE : TRUE;
+		  break;
+		}
+		default:
+		  method=0;
+	  }	
+	  R_Busy(0);
+  }
+  if (method==0)
+    error("unimplemented method");
+  return ret_;
+}
+
+
+/* experimental: all origpos at which we have ties */
+SEXP r_ram_integer64_sortordertie_asc(
+  SEXP sorted_            /* somehow sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP ret_
+)
+{
+  int i,j,n = LENGTH(sorted_);
+  ValueT *sorted = (ValueT *) REAL(sorted_);
+  IndexT *index = INTEGER(order_);
+  IndexT * ret = INTEGER(ret_);
+  
+  if (n){
+	  R_Busy(1);
+	  IndexT nbitflags = n/BITS_INTEGER64+(n%BITS_INTEGER64 ? 1 : 0);
+	  ValueT *bitflags;
+	  bitflags = (ValueT *) R_alloc(nbitflags, sizeof(ValueT));
+	  for (i=0;i<nbitflags;i++)
+		bitflags[i]=0;
+	  j = 0;
+	  for(i=1;i<n;i++)
+	    if (sorted[i]!=sorted[j]){
+			if (i>j+1){
+				for (;j<i;j++)
+					bitflags[(index[j]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[j]-1) % BITS_INTEGER64));
+			}else{
+				j = i;
+			}
+		}
+			if (i>j+1){
+				for (;j<i;j++)
+					bitflags[(index[j]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[j]-1) % BITS_INTEGER64));
+			}
+	  j = 0;
+	  for(i=0;i<n;i++)
+		if ((bitflags[i/BITS_INTEGER64] & (RIGHTBIT_INTEGER64 << (i % BITS_INTEGER64))))
+			ret[j++] = i+1;
+	  R_Busy(0);
+  }
+  return ret_;
+}
+
+
+SEXP r_ram_integer64_ordertie_asc(
+  SEXP table_            /* sorted table vector */
+, SEXP order_            /* sorted table vector */
+, SEXP ret_
+)
+{
+  int i,j,pos,n = LENGTH(table_);
+  ValueT *table = (ValueT *) REAL(table_);
+  IndexT *index = INTEGER(order_);
+  IndexT * ret = INTEGER(ret_);
+  if (n){
+	  R_Busy(1);
+	  IndexT nbitflags = n/BITS_INTEGER64+(n%BITS_INTEGER64 ? 1 : 0);
+	  ValueT *bitflags;
+	  bitflags = (ValueT *) R_alloc(nbitflags, sizeof(ValueT));
+	  for (i=0;i<nbitflags;i++)
+		bitflags[i]=0;
+	  j = 0;
+	  pos = index[0]-1;
+	  for(i=1;i<n;i++)
+	    if (table[index[i]-1]!=table[pos]){
+			if (i>j+1){
+				for (;j<i;j++)
+					bitflags[(index[j]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[j]-1) % BITS_INTEGER64));
+			}else{
+				j = i;
+			}
+		pos = index[i]-1;
+		}
+			if (i>j+1){
+				for (;j<i;j++)
+					bitflags[(index[j]-1)/BITS_INTEGER64] |= (RIGHTBIT_INTEGER64 << ((index[j]-1) % BITS_INTEGER64));
+			}
+	  j = 0;
+	  for(i=0;i<n;i++)
+		if ((bitflags[i/BITS_INTEGER64] & (RIGHTBIT_INTEGER64 << (i % BITS_INTEGER64))))
+			ret[j++] = i+1;
+
+	  R_Busy(0);
+  }
+  return ret_;
+}
+
+SEXP r_ram_integer64_sortsrt(
+  SEXP x_            /* sorted data vector */
+, SEXP na_count_     /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP ret_   		 /* logical scalar */
+)
+{
+  R_Busy(1);
+  DEBUG_INIT
+  
+  int i,j,l,r,n = LENGTH(x_);
+  Rboolean na_count   = asInteger(na_count_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  
+  ValueT *sorted;
+  sorted = (ValueT *) REAL(x_);
+  ValueT *ret;
+  ret = (ValueT *) REAL(ret_);
+  
+  if (na_last){
+	for (i=0,j=n-na_count;i<na_count;i++,j++)
+		ret[j] = sorted[i];
+  }else{
+	for (i=0,j=0;i<na_count;i++,j++)
+		ret[j] = sorted[i];
+	ret += na_count;
+  }
+  sorted += na_count;
+  n = n - na_count;
+
+  if (decreasing){
+	  for(l=n-2,r=n-1,j=0;l>=0;l--)
+	    if (sorted[l]!=sorted[r]){
+			for (i=l+1;i<=r;i++,j++)
+				ret[j] = sorted[i];
+			r=l;
+		}
+			for (i=l+1;i<=r;i++,j++)
+				ret[j] = sorted[i];
+  }else{
+	for (i=0,j=0;i<n;i++,j++)
+		ret[j] = sorted[i];
+  }
+  
+  R_Busy(0);
+  return ret_;
+}
+
+SEXP r_ram_integer64_sortorderord(
+  SEXP x_            /* sorted data vector */
+, SEXP index_        /* index vector */
+, SEXP na_count_     /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP ret_   		 /* logical scalar */
+)
+{
+  R_Busy(1);
+  DEBUG_INIT
+  
+  int i,j,l,r,n = LENGTH(x_);
+  Rboolean na_count   = asInteger(na_count_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  
+  ValueT *sorted;
+  sorted = (ValueT *) REAL(x_);
+  IndexT *index = INTEGER(index_);
+  IndexT *ret = INTEGER(ret_);
+  
+  if (na_last){
+	for (i=0,j=n-na_count;i<na_count;i++,j++)
+		ret[j] = index[i];
+  }else{
+	for (i=0,j=0;i<na_count;i++,j++)
+		ret[j] = index[i];
+	ret += na_count;
+  }
+  index += na_count;
+  n = n - na_count;
+
+  if (decreasing){
+	  sorted += na_count;
+	  for(l=n-2,r=n-1,j=0;l>=0;l--)
+		if (sorted[l]!=sorted[r]){
+			for (i=l+1;i<=r;i++,j++)
+				ret[j] = index[i];
+			r=l;
+		}
+			for (i=l+1;i<=r;i++,j++)
+				ret[j] = index[i];
+  }else{
+	for (i=0,j=0;i<n;i++,j++)
+		ret[j] = index[i];
+  }
+  
+  R_Busy(0);
+  return ret_;
+}
+
+SEXP r_ram_integer64_orderord(
+  SEXP x_            /* sorted data vector */
+, SEXP index_        /* index vector */
+, SEXP na_count_     /* logical scalar */
+, SEXP na_last_      /* logical scalar */
+, SEXP decreasing_   /* logical scalar */
+, SEXP ret_   		 /* logical scalar */
+)
+{
+  R_Busy(1);
+  DEBUG_INIT
+  
+  int i,j,l,r,n = LENGTH(x_);
+  Rboolean na_count   = asInteger(na_count_);
+  Rboolean na_last    = asLogical(na_last_);
+  Rboolean decreasing = asLogical(decreasing_);
+  
+  ValueT *data;
+  data = (ValueT *) REAL(x_);
+  IndexT *index = INTEGER(index_);
+  IndexT *ret = INTEGER(ret_);
+  
+  if (na_last){
+	for (i=0,j=n-na_count;i<na_count;i++,j++)
+		ret[j] = index[i];
+  }else{
+	for (i=0,j=0;i<na_count;i++,j++)
+		ret[j] = index[i];
+	ret += na_count;
+  }
+  index += na_count;
+  n = n - na_count;
+
+  if (decreasing){
+	  data += na_count;
+	  for(l=n-2,r=n-1,j=0;l>=0;l--)
+		if (data[index[l]]!=data[index[r]]){
+			for (i=l+1;i<=r;i++,j++)
+				ret[j] = index[i];
+			r=l;
+		}
+			for (i=l+1;i<=r;i++,j++)
+				ret[j] = index[i];
+  }else{
+	for (i=0,j=0;i<n;i++,j++)
+		ret[j] = index[i];
+  }
+  
+  R_Busy(0);
+  return ret_;
+}
+
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-cran-bit64.git



More information about the debian-med-commit mailing list