[Pkg-javascript-commits] [node-leveldown] 103/492: added `compress` boolean on open()
Andrew Kelley
andrewrk-guest at moszumanska.debian.org
Sun Jul 6 17:13:49 UTC 2014
This is an automated email from the git hooks/post-receive script.
andrewrk-guest pushed a commit to annotated tag rocksdb-0.10.1
in repository node-leveldown.
commit f160b903625b5d0b170c7bfabc23dcb0ea7909a8
Author: Rod Vagg <rod at vagg.org>
Date: Sat Dec 15 11:05:55 2012 +1100
added `compress` boolean on open()
---
README.md | 21 +++++++++++---------
lib/levelup.js | 1 +
src/database.cc | 4 +++-
src/database_async.h | 2 ++
test/compression-test.js | 28 +++++++++++++++++++++++----
test/functional/compat-test.js | 44 +++++++++++++++++++++++-------------------
6 files changed, 66 insertions(+), 34 deletions(-)
diff --git a/README.md b/README.md
index a945a31..05fd784 100644
--- a/README.md
+++ b/README.md
@@ -109,16 +109,18 @@ db.get('foo', function (err, value) {
`levelup()` takes an optional options object as its second argument; the following properties are accepted:
-* `createIfMissing` *(boolean)*: If `true`, will initialise an empty database at the specified location if one doesn't already exit. If `false` and a database doesn't exist you will receive an error in your `open()` callback and your database won't open. Defaults to `false`.
+* `'createIfMissing'` *(boolean, default: `false`)*: If `true`, will initialise an empty database at the specified location if one doesn't already exit. If `false` and a database doesn't exist you will receive an error in your `open()` callback and your database won't open.
-* `errorIfExists` *(boolean)*: If `true`, you will receive an error in your `open()` callback if the database exists at the specified location. Defaults to `false`.
+* `'errorIfExists'` *(boolean, default: `false`)*: If `true`, you will receive an error in your `open()` callback if the database exists at the specified location.
-* `encoding` *(string)*: The encoding of the keys and values passed through Node.js' `Buffer` implementation (see [Buffer#toString()](http://nodejs.org/docs/latest/api/buffer.html#buffer_buf_tostring_encoding_start_end))
+* `'compression'` *(boolean, default: `true`)*: If `true`, all *compressible* data will be run through the Snappy compression algorithm before being stored. Snappy is very fast and shouldn't gain much speed by disabling so leave this on unless you have good reason to turn it off.
+
+* `'encoding'` *(string, default: `'utf8'`)*: The encoding of the keys and values passed through Node.js' `Buffer` implementation (see [Buffer#toString()](http://nodejs.org/docs/latest/api/buffer.html#buffer_buf_tostring_encoding_start_end))
<p><code>'utf8'</code> is the default encoding for both keys and values so you can simply pass in strings and expect strings from your <code>get()</code> operations. You can also pass <code>Buffer</code> objects as keys and/or values and converstion will be performed.</p>
<p>Supported encodings are: hex, utf8, ascii, binary, base64, ucs2, utf16le.</p>
<p><code>'json'</code> encoding is also supported, see below.</p>
-* `keyEncoding` and `valueEncoding` *(string)*: use instead of `encoding` to specify the exact encoding of both the keys and the values in this database.
+* `'keyEncoding'` and `valueEncoding'` *(string, default: `'utf8'`)*: use instead of `encoding` to specify the exact encoding of both the keys and the values in this database.
Additionally, each of the main interface methods accept an optional options object that can be used to override `encoding` (or `keyEncoding` & `valueEncoding`).
@@ -246,13 +248,13 @@ Additionally, you can supply an options object as the first parameter to `readSt
* `'end'`: the key you wish to end the read on. By default it will continue until the end of the store. Again, the *end* doesn't have to be an actual key as an (inclusive) `<=`-type operation is performed to detect the end. You can also use the `destroy()` method instead of supplying an `'end'` parameter to achieve the same effect.
-* `'reverse'`: a boolean, set to true if you want the stream to go in reverse order. Beware that due to the way LevelDB works, a reverse seek will be slower than a forward seek.
+* `'reverse'` *(boolean, default: `false`)*: a boolean, set to true if you want the stream to go in reverse order. Beware that due to the way LevelDB works, a reverse seek will be slower than a forward seek.
-* `'keys'`: a boolean (defaults to `true`) to indicate whether the `'data'` event should contain keys. If set to `true` and `'values'` set to `false` then `'data'` events will simply be keys, rather than objects with a `'key'` property. Used internally by the `keyStream()` method.
+* `'keys'` *(boolean, default: `true`)*: whether the `'data'` event should contain keys. If set to `true` and `'values'` set to `false` then `'data'` events will simply be keys, rather than objects with a `'key'` property. Used internally by the `keyStream()` method.
-* `'values'`: a boolean (defaults to `true`) to indicate whether the `'data'` event should contain values. If set to `true` and `'keys'` set to `false` then `'data'` events will simply be values, rather than objects with a `'value'` property. Used internally by the `valueStream()` method.
+* `'values'` *(boolean, default: `true`)*: whether the `'data'` event should contain values. If set to `true` and `'keys'` set to `false` then `'data'` events will simply be values, rather than objects with a `'value'` property. Used internally by the `valueStream()` method.
-* `'limit'`: a number (defaults to -1) to limit the number of results collected by this stream. This number represents a *maximum* number of results and may not be reached if you get to the end of the store or your `'end'` value first. A value of -1 means there is no limit.
+* `'limit'` *(number, default: `-1`)*: limit the number of results collected by this stream. This number represents a *maximum* number of results and may not be reached if you get to the end of the store or your `'end'` value first. A value of `-1` means there is no limit.
--------------------------------------------------------
<a name="keyStream"></a>
@@ -321,7 +323,7 @@ The standard `write()`, `end()`, `destroy()` and `destroySoon()` methods are imp
Additionally, you can supply an options object as the first parameter to `writeStream()` with the following option:
-* `'useBatch'`: a boolean (defaults to `true`) if set to `false`, your *WriteStream* will avoid the use of `batch()` and use `put()` to write all data to the database. Since `batch()` is much quicker than multiple `put()` operations, you are advised to leave this as `true` unless you have a good reason to change it.
+* `'useBatch'` *(boolean, default: `true`)*: if set to `false`, your *WriteStream* will avoid the use of `batch()` and use `put()` to write all data to the database. Since `batch()` is much quicker than multiple `put()` operations, you are advised to leave this as `true` unless you have a good reason to change it.
#### Pipes and Node Stream compatibility
@@ -379,6 +381,7 @@ See the [CONTRIBUTING.md](https://github.com/rvagg/node-levelup/blob/master/CONT
* John Chesley - [GitHub/chesles](https://github.com/chesles/) - [Twitter/@chesles](https://twitter.com/chesles)
* Jake Verbaten - [GitHub/raynos](https://github.com/raynos) - [Twitter/@raynos2](https://twitter.com/Raynos2)
* Dominic Tarr - [GitHub/dominictarr](https://github.com/dominictarr) - [Twitter/@dominictarr](https://twitter.com/dominictarr)
+* Max Ogden - [GitHub/maxogden](https://github.com/maxogden) - [Twitter/@maxogden](http://twitter.com/maxogden)
<a name="licence"></a>
Licence & copyright
diff --git a/lib/levelup.js b/lib/levelup.js
index b44e797..7ddabfb 100644
--- a/lib/levelup.js
+++ b/lib/levelup.js
@@ -16,6 +16,7 @@ var bridge = require('bindings')('levelup.node')
, encoding : 'utf8'
, keyEncoding : null
, valueEncoding : null
+ , compression : true
}
, encodingOpts = (function (enc) {
diff --git a/src/database.cc b/src/database.cc
index 12f4ee8..5df7d1c 100644
--- a/src/database.cc
+++ b/src/database.cc
@@ -23,6 +23,7 @@ using namespace leveldb;
LU_OPTION ( createIfMissing ); // for open()
LU_OPTION ( errorIfExists ); // for open()
+LU_OPTION ( compression ); // for open()
LU_OPTION ( sync ); // for write() and delete()
LU_STR ( key );
LU_STR ( value );
@@ -124,7 +125,8 @@ Handle<Value> Database::Open (const Arguments& args) {
, callback
, *location
, optionsObj->Has(option_createIfMissing) && optionsObj->Get(option_createIfMissing)->BooleanValue()
- , optionsObj->Has(option_errorIfExists) && optionsObj->Get(option_errorIfExists)->BooleanValue()
+ , optionsObj->Has(option_errorIfExists) && optionsObj->Get(option_errorIfExists)->BooleanValue()
+ , optionsObj->Has(option_compression) && optionsObj->Get(option_compression)->BooleanValue()
);
AsyncQueueWorker(worker);
diff --git a/src/database_async.h b/src/database_async.h
index 5e95e62..62508df 100644
--- a/src/database_async.h
+++ b/src/database_async.h
@@ -21,12 +21,14 @@ public:
, string location
, bool createIfMissing
, bool errorIfExists
+ , bool compression
) : AsyncWorker(database, callback)
, location(location)
{
options = new Options();
options->create_if_missing = createIfMissing;
options->error_if_exists = errorIfExists;
+ options->compression = compression ? kSnappyCompression : kNoCompression;
};
virtual ~OpenWorker ();
diff --git a/test/compression-test.js b/test/compression-test.js
index fa15c06..5fcb141 100644
--- a/test/compression-test.js
+++ b/test/compression-test.js
@@ -11,15 +11,21 @@ var buster = require('buster')
, multiples = 10
, dataSize = compressableData.length * multiples
- , verify = function(db, done) {
+ , verify = function(compressed, db, done) {
du(db._location, function (err, size) {
refute(err)
//console.log(Math.round((size / dataSize) * 100) + '% compression ratio (', size, 'b vs', dataSize, 'b)')
- assert(size < dataSize, 'on-disk size (' + size + ') is less than data size (' + dataSize + ')')
+ if (compressed)
+ assert(size < dataSize, 'on-disk size (' + size + ') is less than data size (' + dataSize + ')')
+ else
+ assert(size >= dataSize, 'on-disk size (' + size + ') is greater than data size (' + dataSize + ')')
done()
})
}
+ , verifyCompressed = verify.bind(null, true)
+ , verifyNotCompressed = verify.bind(null, false)
+
buster.testCase('Compression', {
'setUp': common.readStreamSetUp
@@ -34,7 +40,21 @@ buster.testCase('Compression', {
, function (args, callback) {
db.put.apply(db, args.concat([callback]))
}
- , delayed(verify.bind(null, db, done), 0.1)
+ , delayed(verifyCompressed.bind(null, db, done), 0.1)
+ )
+ })
+ }
+
+ , 'test data is not compressed with compression=true on open() (db.put())': function (done) {
+ this.openTestDatabase({ createIfMissing: true, errorIfExists: true, compression: false }, function (db) {
+ async.forEach(
+ Array.apply(null, Array(multiples)).map(function (e, i) {
+ return [ i, compressableData ]
+ })
+ , function (args, callback) {
+ db.put.apply(db, args.concat([callback]))
+ }
+ , delayed(verifyNotCompressed.bind(null, db, done), 0.1)
)
})
}
@@ -45,7 +65,7 @@ buster.testCase('Compression', {
Array.apply(null, Array(multiples)).map(function (e, i) {
return { type: 'put', key: i, value: compressableData }
})
- , delayed(verify.bind(null, db, done), 0.1)
+ , delayed(verifyCompressed.bind(null, db, done), 0.1)
)
})
}
diff --git a/test/functional/compat-test.js b/test/functional/compat-test.js
index 59b0675..eb89708 100644
--- a/test/functional/compat-test.js
+++ b/test/functional/compat-test.js
@@ -13,29 +13,33 @@ var async = require('async')
, dbtar = path.join(__dirname, 'test-data.db.tar')
, dblocation = path.join(__dirname, 'levelup_test_compat.db')
+function runTest (dbtar, callback) {
+ async.series([
+ // pre-clean
+ rimraf.bind(null, tarcommon.dblocation)
+ , rimraf.bind(null, dblocation)
+ , rimraf.bind(null, tarcommon.datadir)
+ // extract existing database
+ , tarcommon.extract.bind(null, dbtar, __dirname)
+ // extract data for comparison
+ , tarcommon.extract.bind(null, tarcommon.datatar, tarcommon.datadir)
+ // open database
+ , tarcommon.opendb.bind(null, dblocation)
+ // verify database entries are the same as the files
+ , tarcommon.verify
+ // clean up
+ , rimraf.bind(null, tarcommon.dblocation)
+ , rimraf.bind(null, dblocation)
+ , rimraf.bind(null, tarcommon.datadir)
+ ], callback)
+}
+
console.log('***************************************************')
console.log('RUNNING COMPAT-DATA-TEST...')
-async.series([
- // pre-clean
- rimraf.bind(null, tarcommon.dblocation)
- , rimraf.bind(null, dblocation)
- , rimraf.bind(null, tarcommon.datadir)
- // extract existing database
- , tarcommon.extract.bind(null, dbtar, __dirname)
- // extract data for comparison
- , tarcommon.extract.bind(null, tarcommon.datatar, tarcommon.datadir)
- // open database
- , tarcommon.opendb.bind(null, dblocation)
- // verify database entries are the same as the files
- , tarcommon.verify
- // clean up
- , rimraf.bind(null, tarcommon.dblocation)
- , rimraf.bind(null, dblocation)
- , rimraf.bind(null, tarcommon.datadir)
-], function (err) {
- if (err) console.error('Error', err)
- else console.log('No errors? All good then!')
+runTest(dbtar, function (err) {
+ if (err) throw err
+ console.log('No errors? All good then!')
console.log('***************************************************')
process.exit(err ? -1 : 0)
})
\ No newline at end of file
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-javascript/node-leveldown.git
More information about the Pkg-javascript-commits
mailing list