diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index daf0cb9..0000000 --- a/.cirrus.yml +++ /dev/null @@ -1,13 +0,0 @@ -freebsd_instance: - image_family: freebsd-12-2 - -task: - name: FreeBSD - provision_script: - - pkg install -y git node npm - install_script: - - git submodule update --init --recursive - # TODO: fails with "env: npm: No such file or directory" - # - env ELECTRON_SKIP_BINARY_DOWNLOAD=1 npm i --unsafe-perm - # test_script: npm t - test_script: echo skipped diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 6f71068..24e1f78 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -10,6 +10,10 @@ updates: - dependency-name: node-gyp # ESM-only - dependency-name: tempy + + # Stay on the 3rd or 4th oldest stable release, per + # https://www.electronjs.org/docs/latest/tutorial/electron-timelines#version-support-policy + - dependency-name: electron - package-ecosystem: github-actions directory: / schedule: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1801772..af3f951 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -40,7 +40,7 @@ jobs: - name: Set up node uses: actions/setup-node@v3 with: - node-version: 14 + node-version: 16 architecture: ${{ matrix.arch }} # Temporary fix for https://github.com/nodejs/node-gyp/issues/2869 # TODO: instead bump node-gyp to >= 10.0.0 (drops Node.js < 16.14) diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml index 7e3ed71..2755d2b 100644 --- a/.github/workflows/smoke.yml +++ b/.github/workflows/smoke.yml @@ -41,7 +41,7 @@ jobs: - name: Set up node uses: actions/setup-node@v3 with: - node-version: 14 + node-version: 16 architecture: ${{ matrix.arch }} # Temporary fix for https://github.com/nodejs/node-gyp/issues/2869 # TODO: instead bump node-gyp to >= 10.0.0 (drops Node.js < 16.14) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a001810..8493a0e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -8,7 +8,7 @@ jobs: matrix: # At the time of writing macos-latest is mac 10; we need 11 to build a universal binary. os: [ubuntu-latest, macos-11, windows-latest] - node: [12, 14, 16] + node: [16, 18, 20] arch: [x86, x64] exclude: - { os: ubuntu-latest, arch: x86 } @@ -42,7 +42,7 @@ jobs: with: file: coverage/lcov.info - name: Test Electron - if: ${{ matrix.node == '14' }} + if: ${{ matrix.node == '16' }} uses: GabrielBB/xvfb-action@v1 with: run: npm run test-electron diff --git a/.npmignore b/.npmignore index 92fd42a..56b963e 100644 --- a/.npmignore +++ b/.npmignore @@ -42,7 +42,6 @@ yarn.lock .gitmodules .github .travis.yml -.cirrus.yml appveyor.yml tsconfig.json diff --git a/README.md b/README.md index 3221bd6..d40612f 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ # classic-level -**An [`abstract-level`](https://github.com/Level/abstract-level) database backed by [LevelDB](https://github.com/google/leveldb).** The successor to [`leveldown`](https://github.com/Level/leveldown) with builtin encodings, sublevels, events, promises and support of Uint8Array. If you are upgrading please see [`UPGRADING.md`](UPGRADING.md). +**An [`abstract-level`](https://github.com/Level/abstract-level) database backed by [LevelDB](https://github.com/google/leveldb).** The successor to [`leveldown`](https://github.com/Level/leveldown) with builtin encodings, sublevels, events, hooks and support of Uint8Array. If you are upgrading, please see [`UPGRADING.md`](UPGRADING.md). -> :pushpin: Which module should I use? What is `abstract-level`? Head over to the [FAQ](https://github.com/Level/community#faq). +> :pushpin: What is `abstract-level`? Head on over to [Frequently Asked Questions](https://github.com/Level/community#faq). [![level badge][level-badge]](https://github.com/Level/awesome) [![npm](https://img.shields.io/npm/v/classic-level.svg)](https://www.npmjs.com/package/classic-level) [![Node version](https://img.shields.io/node/v/classic-level.svg)](https://www.npmjs.com/package/classic-level) -[![Test](https://img.shields.io/github/workflow/status/Level/classic-level/Test?label=test)](https://github.com/Level/classic-level/actions/workflows/test.yml) +[![Test](https://img.shields.io/github/actions/workflow/status/Level/classic-level/test.yml?branch=main\&label=test)](https://github.com/Level/classic-level/actions/workflows/test.yml) [![Coverage](https://img.shields.io/codecov/c/github/Level/classic-level?label=\&logo=codecov\&logoColor=fff)](https://codecov.io/gh/Level/classic-level) [![Standard](https://img.shields.io/badge/standard-informational?logo=javascript\&logoColor=fff)](https://standardjs.com) [![Common Changelog](https://common-changelog.org/badge.svg)](https://common-changelog.org) @@ -21,51 +21,17 @@ - [Supported Platforms](#supported-platforms) - [API](#api) - [`db = new ClassicLevel(location[, options])`](#db--new-classiclevellocation-options) - - [`db.location`](#dblocation) - - [`db.status`](#dbstatus) - - [`db.open([options][, callback])`](#dbopenoptions-callback) - - [`db.close([callback])`](#dbclosecallback) - - [`db.supports`](#dbsupports) - - [`db.get(key[, options][, callback])`](#dbgetkey-options-callback) - - [`db.getMany(keys[, options][, callback])`](#dbgetmanykeys-options-callback) - - [`db.put(key, value[, options][, callback])`](#dbputkey-value-options-callback) - - [`db.del(key[, options][, callback])`](#dbdelkey-options-callback) - - [`db.batch(operations[, options][, callback])`](#dbbatchoperations-options-callback) - - [`chainedBatch = db.batch()`](#chainedbatch--dbbatch) - - [`iterator = db.iterator([options])`](#iterator--dbiteratoroptions) - - [About high water](#about-high-water) - - [`keyIterator = db.keys([options])`](#keyiterator--dbkeysoptions) - - [`valueIterator = db.values([options])`](#valueiterator--dbvaluesoptions) - - [`db.clear([options][, callback])`](#dbclearoptions-callback) - - [`sublevel = db.sublevel(name[, options])`](#sublevel--dbsublevelname-options) - - [`db.approximateSize(start, end[, options][, callback])`](#dbapproximatesizestart-end-options-callback) - - [`db.compactRange(start, end[, options][, callback])`](#dbcompactrangestart-end-options-callback) - - [`db.getProperty(property)`](#dbgetpropertyproperty) - - [`chainedBatch`](#chainedbatch) - - [`chainedBatch.put(key, value[, options])`](#chainedbatchputkey-value-options) - - [`chainedBatch.del(key[, options])`](#chainedbatchdelkey-options) - - [`chainedBatch.clear()`](#chainedbatchclear) - - [`chainedBatch.write([options][, callback])`](#chainedbatchwriteoptions-callback) - - [`chainedBatch.close([callback])`](#chainedbatchclosecallback) - - [`chainedBatch.length`](#chainedbatchlength) - - [`chainedBatch.db`](#chainedbatchdb) - - [`iterator`](#iterator) - - [`for await...of iterator`](#for-awaitof-iterator) - - [`iterator.next([callback])`](#iteratornextcallback) - - [`iterator.nextv(size[, options][, callback])`](#iteratornextvsize-options-callback) - - [`iterator.all([options][, callback])`](#iteratoralloptions-callback) - - [`iterator.seek(target[, options])`](#iteratorseektarget-options) - - [`iterator.close([callback])`](#iteratorclosecallback) - - [`iterator.db`](#iteratordb) - - [`iterator.count`](#iteratorcount) - - [`iterator.limit`](#iteratorlimit) - - [`keyIterator`](#keyiterator) - - [`valueIterator`](#valueiterator) - - [`sublevel`](#sublevel) - - [`sublevel.prefix`](#sublevelprefix) - - [`sublevel.db`](#subleveldb) - - [`ClassicLevel.destroy(location[, callback])`](#classicleveldestroylocation-callback) - - [`ClassicLevel.repair(location[, callback])`](#classiclevelrepairlocation-callback) + - [Opening](#opening) + - [Closing](#closing) + - [Reading](#reading) + - [Writing](#writing) + - [Additional Methods](#additional-methods) + - [`db.location`](#dblocation) + - [`db.approximateSize(start, end[, options])`](#dbapproximatesizestart-end-options) + - [`db.compactRange(start, end[, options])`](#dbcompactrangestart-end-options) + - [`db.getProperty(property)`](#dbgetpropertyproperty) + - [`ClassicLevel.destroy(location)`](#classicleveldestroylocation) + - [`ClassicLevel.repair(location)`](#classiclevelrepairlocation) - [Development](#development) - [Getting Started](#getting-started) - [Contributing](#contributing) @@ -98,23 +64,6 @@ for await (const [key, value] of db.iterator({ gt: 'a' })) { } ``` -All asynchronous methods also support callbacks. - -
Callback example - -```js -db.put('example', { hello: 'world' }, (err) => { - if (err) throw err - - db.get('example', (err, value) => { - if (err) throw err - console.log(value) // { hello: 'world' } - }) -}) -``` - -
- Usage from TypeScript requires generic type parameters.
TypeScript example @@ -142,68 +91,41 @@ const xyz = db.sublevel('xyz', { valueEncoding: 'json' }) ## Supported Platforms -We aim to support _at least_ Active LTS and Current Node.js releases, Electron 5.0.0, as well as any future Node.js and Electron releases thanks to [Node-API](https://nodejs.org/api/n-api.html). +We aim to support _at least_ Active LTS and Current Node.js releases, Electron >= 18, as well as any future Node.js and Electron releases thanks to [Node-API](https://nodejs.org/api/n-api.html). -The `classic-level` npm package ships with prebuilt binaries for popular 64-bit platforms as well as ARM, M1, Android, Alpine (musl), Windows 32-bit, Linux flavors with an old glibc (Debian 8, Ubuntu 14.04, RHEL 7, CentOS 7) and is known to work on: +The `classic-level` npm package ships with prebuilt binaries for popular 64-bit platforms as well as ARM, M1, Android (built against Node.js core rather than the [`nodejs-mobile`](https://github.com/JaneaSystems/nodejs-mobile) fork), Alpine (musl), Windows 32-bit, Linux flavors with an old glibc (Debian 8, Ubuntu 14.04, RHEL 7, CentOS 7) and is known to work on: - **Linux**, including ARM platforms such as Raspberry Pi and Kindle - **Mac OS** (10.7 and later) -- **Windows** -- **FreeBSD** +- **Windows**. When installing `classic-level`, [`node-gyp-build`](https://github.com/prebuild/node-gyp-build) will check if a compatible binary exists and fallback to compiling from source if it doesn't. In that case you'll need a [valid `node-gyp` installation](https://github.com/nodejs/node-gyp#installation). -If you don't want to use the prebuilt binary for the platform you are installing on, specify the `--build-from-source` flag when you install. One of: +If you don't want to use the prebuilt binary for the platform you are installing on, specify the `--build-from-source` flag when you install: ``` -npm install --build-from-source npm install classic-level --build-from-source ``` If you are working on `classic-level` itself and want to recompile the C++ code, run `npm run rebuild`. -Note: the Android prebuilds are made for and built against Node.js core rather than the [`nodejs-mobile`](https://github.com/JaneaSystems/nodejs-mobile) fork. - ## API -The API of `classic-level` follows that of [`abstract-level`](https://github.com/Level/abstract-level) with a few additional options and methods specific to LevelDB. The documentation below covers it all except for [Encodings](https://github.com/Level/abstract-level#encodings), [Events](https://github.com/Level/abstract-level#events) and [Errors](https://github.com/Level/abstract-level#errors) which are exclusively documented in `abstract-level`. - -An `abstract-level` and thus `classic-level` database is at its core a [key-value database](https://en.wikipedia.org/wiki/Key%E2%80%93value_database). A key-value pair is referred to as an _entry_ here and typically returned as an array, comparable to [`Object.entries()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/entries). +The API of `classic-level` follows that of [`abstract-level`](https://github.com/Level/abstract-level#public-api-for-consumers) with a few additional methods and options that are specific to LevelDB. The documentation below only covers the differences. ### `db = new ClassicLevel(location[, options])` -Create a database or open an existing database. The `location` argument must be a directory path (relative or absolute) where LevelDB will store its files. If the directory does not yet exist (and `options.createIfMissing` is true) it will be created recursively. The optional `options` object may contain: - -- `keyEncoding` (string or object, default `'utf8'`): encoding to use for keys -- `valueEncoding` (string or object, default `'utf8'`): encoding to use for values. - -See [Encodings](https://github.com/Level/abstract-level#encodings) for a full description of these options. Other `options` (except `passive`) are forwarded to `db.open()` which is automatically called in a next tick after the constructor returns. Any read & write operations are queued internally until the database has finished opening. If opening fails, those queued operations will yield errors. +Create a database or open an existing database. The `location` argument must be a directory path (relative or absolute) where LevelDB will store its files. If the directory does not yet exist (and `options.createIfMissing` is true) it will be created recursively. Options are the same as in `abstract-level` except for the additional options accepted by `db.open()` and thus by this constructor. -### `db.location` +A `classic-level` database obtains an exclusive lock. If another process or instance has already opened the underlying LevelDB store at the same `location` then opening will fail with error code [`LEVEL_LOCKED`](https://github.com/Level/abstract-level#errors). -Read-only getter that returns the `location` string that was passed to the constructor (as-is). - -### `db.status` - -Read-only getter that returns a string reflecting the current state of the database: - -- `'opening'` - waiting for the database to be opened -- `'open'` - successfully opened the database -- `'closing'` - waiting for the database to be closed -- `'closed'` - successfully closed the database. +### Opening -### `db.open([options][, callback])` +The [`db.open([options])`](https://github.com/Level/abstract-level#dbopenoptions) method has additional options: -Open the database. The `callback` function will be called with no arguments when successfully opened, or with a single error argument if opening failed. The database has an exclusive lock (on disk): if another process or instance has already opened the underlying LevelDB store at the given `location` then opening will fail with error code [`LEVEL_LOCKED`](https://github.com/Level/abstract-level#errors). If no callback is provided, a promise is returned. Options passed to `open()` take precedence over options passed to the database constructor. +- `multithreading` (boolean, default: `false`): allow multiple threads to access the database. This is only relevant when using [worker threads](https://nodejs.org/api/worker_threads.html). -The optional `options` object may contain: - -- `createIfMissing` (boolean, default: `true`): If `true`, create an empty database if one doesn't already exist. If `false` and the database doesn't exist, opening will fail. -- `errorIfExists` (boolean, default: `false`): If `true` and the database already exists, opening will fail. -- `passive` (boolean, default: `false`): Wait for, but do not initiate, opening of the database. -- `multithreading` (boolean, default: `false`): Allow multiple threads to access the database. This is only relevant when using [worker threads](https://nodejs.org/api/worker_threads.html) - -For advanced performance tuning, the `options` object may also contain the following. Modify these options only if you can prove actual benefit for your particular application. +It also has the following options for advanced performance tuning, only to be modified if you can prove actual benefit for your particular application.
Click to expand @@ -228,160 +150,29 @@ For advanced performance tuning, the `options` object may also contain the follo
-It's generally not necessary to call `open()` because it's automatically called by the database constructor. It may however be useful to capture an error from failure to open, that would otherwise not surface until another method like `db.get()` is called. It's also possible to reopen the database after it has been closed with [`close()`](#dbclosecallback). Once `open()` has then been called, any read & write operations will again be queued internally until opening has finished. - -The `open()` and `close()` methods are idempotent. If the database is already open, the `callback` will be called in a next tick. If opening is already in progress, the `callback` will be called when that has finished. If closing is in progress, the database will be reopened once closing has finished. Likewise, if `close()` is called after `open()`, the database will be closed once opening has finished and the prior `open()` call will receive an error. - -### `db.close([callback])` - -Close the database. The `callback` function will be called with no arguments if closing succeeded or with a single `error` argument if closing failed. If no callback is provided, a promise is returned. - -A database has associated resources like file handles and locks. When the database is no longer needed (for the remainder of a program) it's recommended to call `db.close()` to free up resources. The underlying LevelDB store cannot be opened by multiple `classic-level` instances or processes simultaneously. - -After `db.close()` has been called, no further read & write operations are allowed unless and until `db.open()` is called again. For example, `db.get(key)` will yield an error with code [`LEVEL_DATABASE_NOT_OPEN`](https://github.com/Level/abstract-level#errors). Any unclosed iterators or chained batches will be closed by `db.close()` and can then no longer be used even when `db.open()` is called again. - -A `classic-level` database waits for any pending operations to finish before closing. For example: - -```js -db.put('key', 'value', function (err) { - // This happens first -}) - -db.close(function (err) { - // This happens second -}) -``` - -### `db.supports` - -A [manifest](https://github.com/Level/supports) describing the features supported by this database. Might be used like so: - -```js -if (!db.supports.permanence) { - throw new Error('Persistent storage is required') -} -``` - -### `db.get(key[, options][, callback])` - -Get a value from the database by `key`. The optional `options` object may contain: - -- `keyEncoding`: custom key encoding for this operation, used to encode the `key`. -- `valueEncoding`: custom value encoding for this operation, used to decode the value. -- `fillCache` (boolean, default: `true`): Unless set to `false`, LevelDB will fill its in-memory [LRU](http://en.wikipedia.org/wiki/Least_Recently_Used) cache with data that was read. - -The `callback` function will be called with an error if the operation failed. If the key was not found, the error will have code [`LEVEL_NOT_FOUND`](https://github.com/Level/abstract-level#errors). If successful the first argument will be `null` and the second argument will be the value. If no callback is provided, a promise is returned. - -A `classic-level` database supports snapshots (as indicated by `db.supports.snapshots`) which means `db.get()` _should_ read from a snapshot of the database, created at the time `db.get()` was called. This means it should not see the data of simultaneous write operations. However, there's currently a small delay before the snapshot is created. - -### `db.getMany(keys[, options][, callback])` - -Get multiple values from the database by an array of `keys`. The optional `options` object may contain: - -- `keyEncoding`: custom key encoding for this operation, used to encode the `keys`. -- `valueEncoding`: custom value encoding for this operation, used to decode values. -- `fillCache`: same as described for [`db.get()`](#dbgetkey-options-callback). - -The `callback` function will be called with an error if the operation failed. If successful the first argument will be `null` and the second argument will be an array of values with the same order as `keys`. If a key was not found, the relevant value will be `undefined`. If no callback is provided, a promise is returned. - -A `classic-level` database supports snapshots (as indicated by `db.supports.snapshots`) which means `db.getMany()` _should_ read from a snapshot of the database, created at the time `db.getMany()` was called. This means it should not see the data of simultaneous write operations. However, there's currently a small delay before the snapshot is created. - -### `db.put(key, value[, options][, callback])` - -Add a new entry or overwrite an existing entry. The optional `options` object may contain: - -- `keyEncoding`: custom key encoding for this operation, used to encode the `key`. -- `valueEncoding`: custom value encoding for this operation, used to encode the `value`. -- `sync` (boolean, default: `false`): if set to `true`, LevelDB will perform a synchronous write of the data although the operation will be asynchronous as far as Node.js or Electron is concerned. Normally, LevelDB passes the data to the operating system for writing and returns immediately. In contrast, a synchronous write will use [`fsync()`](https://man7.org/linux/man-pages/man2/fsync.2.html) or equivalent, so the `put()` call will not complete until the data is actually on disk. Synchronous writes are significantly slower than asynchronous writes. - -The `callback` function will be called with no arguments if the operation was successful or with an error if it failed. If no callback is provided, a promise is returned. +### Closing -### `db.del(key[, options][, callback])` - -Delete an entry by `key`. The optional `options` object may contain: - -- `keyEncoding`: custom key encoding for this operation, used to encode the `key`. -- `sync` (boolean, default: `false`): same as described for [`db.put()`](#dbputkey-value-options-callback) - -The `callback` function will be called with no arguments if the operation was successful or with an error if it failed. If no callback is provided, a promise is returned. - -### `db.batch(operations[, options][, callback])` - -Perform multiple _put_ and/or _del_ operations in bulk. The `operations` argument must be an array containing a list of operations to be executed sequentially, although as a whole they are performed as an atomic operation. - -Each operation must be an object with at least a `type` property set to either `'put'` or `'del'`. If the `type` is `'put'`, the operation must have `key` and `value` properties. It may optionally have `keyEncoding` and / or `valueEncoding` properties to encode keys or values with a custom encoding for just that operation. If the `type` is `'del'`, the operation must have a `key` property and may optionally have a `keyEncoding` property. - -An operation of either type may also have a `sublevel` property, to prefix the key of the operation with the prefix of that sublevel. This allows atomically committing data to multiple sublevels. Keys and values will be encoded by the sublevel, to the same effect as a `sublevel.batch(..)` call. In the following example, the first `value` will be encoded with `'json'` rather than the default encoding of `db`: - -```js -const people = db.sublevel('people', { valueEncoding: 'json' }) -const nameIndex = db.sublevel('names') - -await db.batch([{ - type: 'put', - sublevel: people, - key: '123', - value: { - name: 'Alice' - } -}, { - type: 'put', - sublevel: nameIndex, - key: 'Alice', - value: '123' -}]) -``` - -The optional `options` object may contain: - -- `keyEncoding`: custom key encoding for this batch, used to encode keys. -- `valueEncoding`: custom value encoding for this batch, used to encode values. -- `sync` (boolean, default: `false`): same as described for [`db.put()`](#dbputkey-value-options-callback). - -Encoding properties on individual operations take precedence. In the following example, the first value will be encoded with the `'utf8'` encoding and the second with `'json'`. +The [`db.close()`](https://github.com/Level/abstract-level#dbclose) method has an additional behavior: it waits for any pending operations to finish before closing. For example: ```js -await db.batch([ - { type: 'put', key: 'a', value: 'foo' }, - { type: 'put', key: 'b', value: 123, valueEncoding: 'json' } -], { valueEncoding: 'utf8' }) +// close() will wait for the put() to finish. +const promise1 = db.put('key', 'value') +const promise2 = db.close() ``` -The `callback` function will be called with no arguments if the batch was successful or with an error if it failed. If no callback is provided, a promise is returned. +### Reading -### `chainedBatch = db.batch()` +The [`db.get(key[, options])`](https://github.com/Level/abstract-level#dbgetkey-options), [`db.getMany(keys[, options])`](https://github.com/Level/abstract-level#dbgetmanykeys-options) and [`db.iterator([options])`](https://github.com/Level/abstract-level#iterator--dbiteratoroptions) methods have an additional option: -Create a [`chained batch`](#chainedbatch), when `batch()` is called with zero arguments. A chained batch can be used to build and eventually commit an atomic batch of operations. Depending on how it's used, it is possible to obtain greater performance with this form of `batch()`. +- `fillCache` (boolean, default: `true`): unless set to `false`, LevelDB will fill its in-memory [LRU](http://en.wikipedia.org/wiki/Least_Recently_Used) cache with data that was read. -```js -await db.batch() - .del('bob') - .put('alice', 361) - .put('kim', 220) - .write() -``` - -### `iterator = db.iterator([options])` - -Create an [`iterator`](#iterator). The optional `options` object may contain the following _range options_ to control the range of entries to be iterated: +A `classic-level` database supports snapshots (as indicated by [`db.supports.snapshots`](https://github.com/Level/supports#snapshots-boolean)) which means `db.get()`, `db.getMany()` and `db.iterator()` read from a snapshot of the database, created synchronously at the time that `db.get()`, `db.getMany()` or `db.iterator()` was called. This means they will not see the data of simultaneous write operations, commonly referred to as having _snapshot guarantees_. -- `gt` (greater than) or `gte` (greater than or equal): define the lower bound of the range to be iterated. Only entries where the key is greater than (or equal to) this option will be included in the range. When `reverse` is true the order will be reversed, but the entries iterated will be the same. -- `lt` (less than) or `lte` (less than or equal): define the higher bound of the range to be iterated. Only entries where the key is less than (or equal to) this option will be included in the range. When `reverse` is true the order will be reversed, but the entries iterated will be the same. -- `reverse` (boolean, default: `false`): iterate entries in reverse order. Beware that a reverse seek can be slower than a forward seek. -- `limit` (number, default: `Infinity`): limit the number of entries yielded. This number represents a _maximum_ number of entries and will not be reached if the end of the range is reached first. A value of `Infinity` or `-1` means there is no limit. When `reverse` is true the entries with the highest keys will be returned instead of the lowest keys. +The [`db.iterator([options])`](https://github.com/Level/abstract-level#iterator--dbiteratoroptions) method also accepts: -The `gte` and `lte` range options take precedence over `gt` and `lt` respectively. If no range options are provided, the iterator will visit all entries of the database, starting at the lowest key and ending at the highest key (unless `reverse` is true). In addition to range options, the `options` object may contain: +- `highWaterMarkBytes` (number, default: `16 * 1024`): limit the amount of data that the iterator will hold in memory. -- `keys` (boolean, default: `true`): whether to return the key of each entry. If set to `false`, the iterator will yield keys that are `undefined`. Prefer to use `db.keys()` instead. -- `values` (boolean, default: `true`): whether to return the value of each entry. If set to `false`, the iterator will yield values that are `undefined`. Prefer to use `db.values()` instead. -- `keyEncoding`: custom key encoding for this iterator, used to encode range options, to encode `seek()` targets and to decode keys. -- `valueEncoding`: custom value encoding for this iterator, used to decode values. -- `fillCache` (boolean, default: `false`): if set to `true`, LevelDB will fill its in-memory [LRU](http://en.wikipedia.org/wiki/Least_Recently_Used) cache with data that was read. -- `highWaterMarkBytes` (number, default: `16 * 1024`): limit the amount of data that the iterator will hold in memory. Explained below. - -#### About high water - -While [`iterator.nextv(size)`](#iteratornextvsize-options-callback) is reading entries from LevelDB into memory, it sums up the byte length of those entries. If and when that sum has exceeded `highWaterMarkBytes`, reading will stop. If `nextv(2)` would normally yield two entries but the first entry is too large, then only one entry will be yielded. More `nextv(size)` calls must then be made to get the remaining entries. +While [`iterator.nextv(size)`](https://github.com/Level/abstract-level#iteratornextvsize-options) is reading entries from LevelDB into memory, it sums up the byte length of those entries. If and when that sum has exceeded `highWaterMarkBytes`, reading will stop. If `nextv(2)` would normally yield two entries but the first entry is too large, then only one entry will be yielded. More `nextv(size)` calls must then be made to get the remaining entries. If memory usage is less of a concern, increasing `highWaterMarkBytes` can increase the throughput of `nextv(size)`. If set to `0` then `nextv(size)` will never yield more than one entry, as `highWaterMarkBytes` will be exceeded on each call. It can not be set to `Infinity`. On key- and value iterators (see below) it applies to the byte length of keys or values respectively, rather than the combined byte length of keys _and_ values. @@ -399,113 +190,39 @@ const stream = new EntryStream(db, { Side note: the "watermark" analogy makes more sense in Node.js streams because its internal `highWaterMark` can grow, indicating the highest that the "water" has been. In a `classic-level` iterator however, `highWaterMarkBytes` is fixed once set. Getting exceeded does not change it. -The `highWaterMarkBytes` option is also applied to an internal cache that `classic-level` employs for [`next()`](#iteratornextcallback) and [`for await...of`](#for-awaitof-iterator). When `next()` is called, that cache is populated with at most 1000 entries, or less than that if `highWaterMarkBytes` is exceeded by the total byte length of entries. To avoid reading too eagerly, the cache is not populated on the first `next()` call, or the first `next()` call after a `seek()`. Only on subsequent `next()` calls. +The `highWaterMarkBytes` option is also applied to an internal cache that `classic-level` employs for [`next()`](https://github.com/Level/abstract-level#iteratornext) and [`for await...of`](https://github.com/Level/abstract-level#for-awaitof-iterator). When `next()` is called, that cache is populated with at most 1000 entries, or less than that if `highWaterMarkBytes` is exceeded by the total byte length of entries. To avoid reading too eagerly, the cache is not populated on the first `next()` call, or the first `next()` call after a `seek()`. Only on subsequent `next()` calls. -### `keyIterator = db.keys([options])` +### Writing -Create a [key iterator](#keyiterator), having the same interface as `db.iterator()` except that it yields keys instead of entries. If only keys are needed, using `db.keys()` may increase performance because values won't have to fetched, copied or decoded. Options are the same as for `db.iterator()` except that `db.keys()` does not take `keys`, `values` and `valueEncoding` options. +The [`db.put(key, value[, options])`](https://github.com/Level/abstract-level#dbputkey-value-options), [`db.del(key[, options])`](https://github.com/Level/abstract-level#dbdelkey-options) and [`db.batch(operations[, options])`](https://github.com/Level/abstract-level#dbbatchoperations-options) and [`chainedBatch.write([options])`](https://github.com/Level/abstract-level#chainedbatchwriteoptions) methods have an additional option: -```js -// Iterate lazily -for await (const key of db.keys({ gt: 'a' })) { - console.log(key) -} - -// Get all at once. Setting a limit is recommended. -const keys = await db.keys({ gt: 'a', limit: 10 }).all() -``` +- `sync` (boolean, default: `false`): if set to `true`, LevelDB will perform a synchronous write of the data although the operation will be asynchronous as far as Node.js or Electron is concerned. Normally, LevelDB passes the data to the operating system for writing and returns immediately. In contrast, a synchronous write will use [`fsync()`](https://man7.org/linux/man-pages/man2/fsync.2.html) or equivalent, so the write will not complete until the data is actually on disk. Synchronous writes are significantly slower than asynchronous writes. -### `valueIterator = db.values([options])` - -Create a [value iterator](#valueiterator), having the same interface as `db.iterator()` except that it yields values instead of entries. If only values are needed, using `db.values()` may increase performance because keys won't have to fetched, copied or decoded. Options are the same as for `db.iterator()` except that `db.values()` does not take `keys` and `values` options. Note that it _does_ take a `keyEncoding` option, relevant for the encoding of range options. - -```js -// Iterate lazily -for await (const value of db.values({ gt: 'a' })) { - console.log(value) -} - -// Get all at once. Setting a limit is recommended. -const values = await db.values({ gt: 'a', limit: 10 }).all() -``` - -### `db.clear([options][, callback])` - -Delete all entries or a range. Not guaranteed to be atomic. Accepts the following options (with the same rules as on iterators): - -- `gt` (greater than) or `gte` (greater than or equal): define the lower bound of the range to be deleted. Only entries where the key is greater than (or equal to) this option will be included in the range. When `reverse` is true the order will be reversed, but the entries deleted will be the same. -- `lt` (less than) or `lte` (less than or equal): define the higher bound of the range to be deleted. Only entries where the key is less than (or equal to) this option will be included in the range. When `reverse` is true the order will be reversed, but the entries deleted will be the same. -- `reverse` (boolean, default: `false`): delete entries in reverse order. Only effective in combination with `limit`, to delete the last N entries. -- `limit` (number, default: `Infinity`): limit the number of entries to be deleted. This number represents a _maximum_ number of entries and will not be reached if the end of the range is reached first. A value of `Infinity` or `-1` means there is no limit. When `reverse` is true the entries with the highest keys will be deleted instead of the lowest keys. -- `keyEncoding`: custom key encoding for this operation, used to encode range options. - -The `gte` and `lte` range options take precedence over `gt` and `lt` respectively. If no options are provided, all entries will be deleted. The `callback` function will be called with no arguments if the operation was successful or with an error if it failed. If no callback is provided, a promise is returned. - -### `sublevel = db.sublevel(name[, options])` - -Create a [sublevel](#sublevel) that has the same interface as `db` (except for additional `classic-level` methods like `db.approximateSize()`) and prefixes the keys of operations before passing them on to `db`. The `name` argument is required and must be a string. - -```js -const example = db.sublevel('example') - -await example.put('hello', 'world') -await db.put('a', '1') - -// Prints ['hello', 'world'] -for await (const [key, value] of example.iterator()) { - console.log([key, value]) -} -``` - -Sublevels effectively separate a database into sections. Think SQL tables, but evented, ranged and realtime! Each sublevel is an `AbstractLevel` instance with its own keyspace, [events](https://github.com/Level/abstract-level#events) and [encodings](https://github.com/Level/abstract-level#encodings). For example, it's possible to have one sublevel with `'buffer'` keys and another with `'utf8'` keys. The same goes for values. Like so: - -```js -db.sublevel('one', { valueEncoding: 'json' }) -db.sublevel('two', { keyEncoding: 'buffer' }) -``` - -An own keyspace means that `sublevel.iterator()` only includes entries of that sublevel, `sublevel.clear()` will only delete entries of that sublevel, and so forth. Range options get prefixed too. - -Fully qualified keys (as seen from the parent database) take the form of `prefix + key` where `prefix` is `separator + name + separator`. If `name` is empty, the effective prefix is two separators. Sublevels can be nested: if `db` is itself a sublevel then the effective prefix is a combined prefix, e.g. `'!one!!two!'`. Note that a parent database will see its own keys as well as keys of any nested sublevels: - -```js -// Prints ['!example!hello', 'world'] and ['a', '1'] -for await (const [key, value] of db.iterator()) { - console.log([key, value]) -} -``` +### Additional Methods -> :pushpin: The key structure is equal to that of [`subleveldown`](https://github.com/Level/subleveldown) which offered sublevels before they were built-in to `abstract-level` and thus `classic-level`. This means that an `classic-level` sublevel can read sublevels previously created with (and populated by) `subleveldown`. +The following methods and properties are not part of the [`abstract-level`](https://github.com/Level/abstract-level) interface. -Internally, sublevels operate on keys that are either a string, Buffer or Uint8Array, depending on choice of encoding. Which is to say: binary keys are fully supported. The `name` must however always be a string and can only contain ASCII characters. +#### `db.location` -The optional `options` object may contain: - -- `separator` (string, default: `'!'`): Character for separating sublevel names from user keys and each other. Must sort before characters used in `name`. An error will be thrown if that's not the case. -- `keyEncoding` (string or object, default `'utf8'`): encoding to use for keys -- `valueEncoding` (string or object, default `'utf8'`): encoding to use for values. - -The `keyEncoding` and `valueEncoding` options are forwarded to the `AbstractLevel` constructor and work the same, as if a new, separate database was created. They default to `'utf8'` regardless of the encodings configured on `db`. Other options are forwarded too but `classic-level` has no relevant options at the time of writing. For example, setting the `createIfMissing` option will have no effect. Why is that? - -Like regular databases, sublevels open themselves but they do not affect the state of the parent database. This means a sublevel can be individually closed and (re)opened. If the sublevel is created while the parent database is opening, it will wait for that to finish. If the parent database is closed, then opening the sublevel will fail and subsequent operations on the sublevel will yield errors with code [`LEVEL_DATABASE_NOT_OPEN`](https://github.com/Level/abstract-level#errors). +Read-only getter that returns the `location` string that was passed to the constructor (as-is). -### `db.approximateSize(start, end[, options][, callback])` +#### `db.approximateSize(start, end[, options])` Get the approximate number of bytes of file system space used by the range `[start..end)`. The result might not include recently written data. The optional `options` object may contain: - `keyEncoding`: custom key encoding for this operation, used to encode `start` and `end`. -The `callback` function will be called with a single error argument if the operation failed. If successful the first argument will be `null` and the second argument will be the approximate size as a number. If no callback is provided, a promise is returned. This method is an additional method that is not part of the [`abstract-level`](https://github.com/Level/abstract-level) interface. +Returns a promise for a number. -### `db.compactRange(start, end[, options][, callback])` +#### `db.compactRange(start, end[, options])` Manually trigger a database compaction in the range `[start..end]`. The optional `options` object may contain: - `keyEncoding`: custom key encoding for this operation, used to encode `start` and `end`. -The `callback` function will be called with no arguments if the operation was successful or with an error if it failed. If no callback is provided, a promise is returned. This method is an additional method that is not part of the [`abstract-level`](https://github.com/Level/abstract-level) interface. +Returns a promise. -### `db.getProperty(property)` +#### `db.getProperty(property)` Get internal details from LevelDB. When issued with a valid `property` string, a string value is returned synchronously. Valid properties are: @@ -513,215 +230,11 @@ Get internal details from LevelDB. When issued with a valid `property` string, a - `leveldb.stats`: returns a multi-line string describing statistics about LevelDB's internal operation. - `leveldb.sstables`: returns a multi-line string describing all of the _sstables_ that make up contents of the current database. -This method is an additional method that is not part of the [`abstract-level`](https://github.com/Level/abstract-level) interface. - -### `chainedBatch` - -#### `chainedBatch.put(key, value[, options])` - -Queue a `put` operation on this batch, not committed until `write()` is called. This will throw a [`LEVEL_INVALID_KEY`](https://github.com/Level/abstract-level#errors) or [`LEVEL_INVALID_VALUE`](https://github.com/Level/abstract-level#errors) error if `key` or `value` is invalid. The optional `options` object may contain: - -- `keyEncoding`: custom key encoding for this operation, used to encode the `key`. -- `valueEncoding`: custom value encoding for this operation, used to encode the `value`. -- `sublevel` (sublevel instance): act as though the `put` operation is performed on the given sublevel, to similar effect as `sublevel.batch().put(key, value)`. This allows atomically committing data to multiple sublevels. The `key` will be prefixed with the `prefix` of the sublevel, and the `key` and `value` will be encoded by the sublevel (using the default encodings of the sublevel unless `keyEncoding` and / or `valueEncoding` are provided). - -#### `chainedBatch.del(key[, options])` - -Queue a `del` operation on this batch, not committed until `write()` is called. This will throw a [`LEVEL_INVALID_KEY`](https://github.com/Level/abstract-level#errors) error if `key` is invalid. The optional `options` object may contain: - -- `keyEncoding`: custom key encoding for this operation, used to encode the `key`. -- `sublevel` (sublevel instance): act as though the `del` operation is performed on the given sublevel, to similar effect as `sublevel.batch().del(key)`. This allows atomically committing data to multiple sublevels. The `key` will be prefixed with the `prefix` of the sublevel, and the `key` will be encoded by the sublevel (using the default key encoding of the sublevel unless `keyEncoding` is provided). - -#### `chainedBatch.clear()` - -Clear all queued operations on this batch. - -#### `chainedBatch.write([options][, callback])` - -Commit the queued operations for this batch. All operations will be written atomically, that is, they will either all succeed or fail with no partial commits. - -The optional `options` object may contain: - -- `sync` (boolean, default: `false`): same as described for [`db.put()`](#dbputkey-value-options-callback). - -Note that `write()` does not take encoding options. Those can only be set on `put()` and `del()` because `classic-level` synchronously forwards such calls to LevelDB and thus need keys and values to be encoded at that point. - -The `callback` function will be called with no arguments if the batch was successful or with an error if it failed. If no callback is provided, a promise is returned. - -After `write()` or `close()` has been called, no further operations are allowed. - -#### `chainedBatch.close([callback])` - -Free up underlying resources. This should be done even if the chained batch has zero queued operations. Automatically called by `write()` so normally not necessary to call, unless the intent is to discard a chained batch without committing it. The `callback` function will be called with no arguments. If no callback is provided, a promise is returned. Closing the batch is an idempotent operation, such that calling `close()` more than once is allowed and makes no difference. - -#### `chainedBatch.length` - -The number of queued operations on the current batch. - -#### `chainedBatch.db` - -A reference to the database that created this chained batch. - -### `iterator` - -An iterator allows one to lazily read a range of entries stored in the database. The entries will be sorted by keys in [lexicographic order](https://en.wikipedia.org/wiki/Lexicographic_order) (in other words: byte order) which in short means key `'a'` comes before `'b'` and key `'10'` comes before `'2'`. - -An iterator reads from a snapshot of the database, created at the time `db.iterator()` was called. This means the iterator will not see the data of simultaneous write operations. - -Iterators can be consumed with [`for await...of`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of) and `iterator.all()`, or by manually calling `iterator.next()` or `nextv()` in succession. In the latter case, `iterator.close()` must always be called. In contrast, finishing, throwing, breaking or returning from a `for await...of` loop automatically calls `iterator.close()`, as does `iterator.all()`. - -An iterator reaches its natural end in the following situations: - -- The end of the database has been reached -- The end of the range has been reached -- The last `iterator.seek()` was out of range. - -An iterator keeps track of calls that are in progress. It doesn't allow concurrent `next()`, `nextv()` or `all()` calls (including a combination thereof) and will throw an error with code [`LEVEL_ITERATOR_BUSY`](https://github.com/Level/abstract-level#errors) if that happens: - -```js -// Not awaited and no callback provided -iterator.next() - -try { - // Which means next() is still in progress here - iterator.all() -} catch (err) { - console.log(err.code) // 'LEVEL_ITERATOR_BUSY' -} -``` - -#### `for await...of iterator` - -Yields entries, which are arrays containing a `key` and `value`. The type of `key` and `value` depends on the options passed to `db.iterator()`. - -```js -try { - for await (const [key, value] of db.iterator()) { - console.log(key) - } -} catch (err) { - console.error(err) -} -``` - -#### `iterator.next([callback])` - -Advance to the next entry and yield that entry. If an error occurs, the `callback` function will be called with an error. Otherwise, the `callback` receives `null`, a `key` and a `value`. The type of `key` and `value` depends on the options passed to `db.iterator()`. If the iterator has reached its natural end, both `key` and `value` will be `undefined`. - -If no callback is provided, a promise is returned for either an array (containing a `key` and `value`) or `undefined` if the iterator reached its natural end. - -**Note:** `iterator.close()` must always be called once there's no intention to call `next()` or `nextv()` again. Even if such calls yielded an error and even if the iterator reached its natural end. Not closing the iterator will result in memory leaks and may also affect performance of other operations if many iterators are unclosed and each is holding a snapshot of the database. - -#### `iterator.nextv(size[, options][, callback])` - -Advance repeatedly and get at most `size` amount of entries in a single call. Can be faster than repeated `next()` calls. The `size` argument must be an integer and has a soft minimum of 1. There are no `options` currently. - -If an error occurs, the `callback` function will be called with an error. Otherwise, the `callback` receives `null` and an array of entries, where each entry is an array containing a key and value. The natural end of the iterator will be signaled by yielding an empty array. If no callback is provided, a promise is returned. - -```js -const iterator = db.iterator() - -while (true) { - const entries = await iterator.nextv(100) - - if (entries.length === 0) { - break - } - - for (const [key, value] of entries) { - // .. - } -} - -await iterator.close() -``` - -#### `iterator.all([options][, callback])` - -Advance repeatedly and get all (remaining) entries as an array, automatically closing the iterator. Assumes that those entries fit in memory. If that's not the case, instead use `next()`, `nextv()` or `for await...of`. There are no `options` currently. If an error occurs, the `callback` function will be called with an error. Otherwise, the `callback` receives `null` and an array of entries, where each entry is an array containing a key and value. If no callback is provided, a promise is returned. - -```js -const entries = await db.iterator({ limit: 100 }).all() - -for (const [key, value] of entries) { - // .. -} -``` - -#### `iterator.seek(target[, options])` - -Seek to the key closest to `target`. Subsequent calls to `iterator.next()`, `nextv()` or `all()` (including implicit calls in a `for await...of` loop) will yield entries with keys equal to or larger than `target`, or equal to or smaller than `target` if the `reverse` option passed to `db.iterator()` was true. - -The optional `options` object may contain: - -- `keyEncoding`: custom key encoding, used to encode the `target`. By default the `keyEncoding` option of the iterator is used or (if that wasn't set) the `keyEncoding` of the database. - -If range options like `gt` were passed to `db.iterator()` and `target` does not fall within that range, the iterator will reach its natural end. - -#### `iterator.close([callback])` - -Free up underlying resources. The `callback` function will be called with no arguments. If no callback is provided, a promise is returned. Closing the iterator is an idempotent operation, such that calling `close()` more than once is allowed and makes no difference. - -If a `next()` ,`nextv()` or `all()` call is in progress, closing will wait for that to finish. After `close()` has been called, further calls to `next()` ,`nextv()` or `all()` will yield an error with code [`LEVEL_ITERATOR_NOT_OPEN`](https://github.com/Level/abstract-level#errors). - -#### `iterator.db` - -A reference to the database that created this iterator. - -#### `iterator.count` - -Read-only getter that indicates how many keys have been yielded so far (by any method) excluding calls that errored or yielded `undefined`. - -#### `iterator.limit` - -Read-only getter that reflects the `limit` that was set in options. Greater than or equal to zero. Equals `Infinity` if no limit, which allows for easy math: - -```js -const hasMore = iterator.count < iterator.limit -const remaining = iterator.limit - iterator.count -``` - -### `keyIterator` - -A key iterator has the same interface as `iterator` except that its methods yield keys instead of entries. For the `keyIterator.next(callback)` method, this means that the `callback` will receive two arguments (an error and key) instead of three. Usage is otherwise the same. - -### `valueIterator` - -A value iterator has the same interface as `iterator` except that its methods yield values instead of entries. For the `valueIterator.next(callback)` method, this means that the `callback` will receive two arguments (an error and value) instead of three. Usage is otherwise the same. - -### `sublevel` - -A sublevel is an instance of the `AbstractSublevel` class (as found in [`abstract-level`](https://github.com/Level/abstract-level)) which extends `AbstractLevel` and thus has the same API as documented above, except for additional `classic-level` methods like `db.approximateSize()`. Sublevels have a few additional properties. - -#### `sublevel.prefix` - -Prefix of the sublevel. A read-only string property. - -```js -const example = db.sublevel('example') -const nested = example.sublevel('nested') - -console.log(example.prefix) // '!example!' -console.log(nested.prefix) // '!example!!nested!' -``` - -#### `sublevel.db` - -Parent database. A read-only property. - -```js -const example = db.sublevel('example') -const nested = example.sublevel('nested') - -console.log(example.db === db) // true -console.log(nested.db === db) // true -``` - -### `ClassicLevel.destroy(location[, callback])` +#### `ClassicLevel.destroy(location)` Completely remove an existing LevelDB database directory. You can use this method in place of a full directory removal if you want to be sure to only remove LevelDB-related files. If the directory only contains LevelDB files, the directory itself will be removed as well. If there are additional, non-LevelDB files in the directory, those files and the directory will be left alone. -The `callback` function will be called when the destroy operation is complete, with a possible error argument. If no callback is provided, a promise is returned. This method is an additional method that is not part of the [`abstract-level`](https://github.com/Level/abstract-level) interface. +Returns a promise for the completion of the destroy operation. Before calling `destroy()`, close a database if it's using the same `location`: @@ -731,13 +244,13 @@ await db.close() await ClassicLevel.destroy('./db') ``` -### `ClassicLevel.repair(location[, callback])` +#### `ClassicLevel.repair(location)` Attempt a restoration of a damaged database. It can also be used to perform a compaction of the LevelDB log into table files. From LevelDB documentation: > If a DB cannot be opened, you may attempt to call this method to resurrect as much of the contents of the database as possible. Some data may be lost, so be careful when calling this function on a database that contains important information. -The `callback` function will be called when the repair operation is complete, with a possible error argument. If no callback is provided, a promise is returned. This method is an additional method that is not part of the [`abstract-level`](https://github.com/Level/abstract-level) interface. +Returns a promise for the completion of the repair operation. You will find information on the repair operation in the `LOG` file inside the database directory. diff --git a/UPGRADING.md b/UPGRADING.md index c1d74bf..cd17567 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -2,6 +2,10 @@ This document describes breaking changes and how to upgrade. For a complete list of changes including minor and patch releases, please refer to the [changelog](CHANGELOG.md). +## 2.0.0 + +This release upgrades to `abstract-level` 2.0.0 which adds [hooks](https://github.com/Level/abstract-level#hooks) and drops callbacks, not-found errors and support of Node.js < 16. Please refer to the [upgrade guide of `abstract-level`](https://github.com/Level/abstract-level/blob/v2.0.0/UPGRADING.md). The only thing to add is that the additional methods of `classic-level` like `db.approximateSize()` also don't support callbacks anymore. + ## 1.0.0 **Introducing `classic-level`: a fork of [`leveldown`](https://github.com/Level/leveldown) that implements the [`abstract-level`](https://github.com/Level/abstract-level) interface instead of [`abstract-leveldown`](https://github.com/Level/abstract-leveldown). It thus has the same API as `level` and `levelup` including encodings, promises and events. In addition, you can now choose to use Uint8Array instead of Buffer. Sublevels are builtin.** diff --git a/binding.cc b/binding.cc index 015b283..6af0926 100644 --- a/binding.cc +++ b/binding.cc @@ -18,7 +18,6 @@ */ struct Database; struct Iterator; -static void iterator_close_do (napi_env env, Iterator* iterator, napi_value cb); static leveldb::Status threadsafe_open(const leveldb::Options &options, bool multithreading, Database &db_instance); @@ -56,6 +55,11 @@ static std::map db_handles; #define NAPI_RETURN_UNDEFINED() \ return 0; +#define NAPI_PROMISE() \ + napi_deferred deferred; \ + napi_value promise; \ + NAPI_STATUS_THROWS(napi_create_promise(env, &deferred, &promise)); + #define NAPI_UTF8_NEW(name, val) \ size_t name##_size = 0; \ NAPI_STATUS_THROWS(napi_get_value_string_utf8(env, val, NULL, 0, &name##_size)) \ @@ -94,6 +98,11 @@ static std::map db_handles; } \ } +/** + * Bit fields. + */ +#define STATE_ENDED 1 + /********************************************************************* * Helpers. ********************************************************************/ @@ -180,6 +189,20 @@ static bool BooleanProperty (napi_env env, napi_value obj, const char* key, return DEFAULT; } +/** + * Returns a boolean value. + * Returns 'DEFAULT' if the JS value is undefined or otherwise not a boolean. + */ +static bool BooleanValue (napi_env env, napi_value value, bool DEFAULT) { + bool result; + + if (napi_get_value_bool(env, value, &result) == napi_ok) { + return result; + } else { + return DEFAULT; + } +} + enum Encoding { buffer, utf8, view }; /** @@ -202,6 +225,19 @@ static Encoding GetEncoding (napi_env env, napi_value options, const char* optio return Encoding::utf8; } +/** + * Returns internal Encoding enum by its equivalent numeric value. + */ +static Encoding GetEncoding (napi_env env, napi_value value) { + int32_t result; + + if (napi_get_value_int32(env, value, &result) == napi_ok) { + return static_cast(result); + } + + return Encoding::utf8; +} + /** * Returns a uint32 property 'key' from 'obj'. * Returns 'DEFAULT' if the property doesn't exist. @@ -311,18 +347,6 @@ static std::vector KeyArray (napi_env env, napi_value arr) { return result; } -/** - * Calls a function. - */ -static napi_status CallFunction (napi_env env, - napi_value callback, - const int argc, - napi_value* argv) { - napi_value global; - napi_get_global(env, &global); - return napi_call_function(env, global, callback, argc, argv, NULL); -} - /** * Whether to yield entries, keys or values. */ @@ -379,23 +403,26 @@ struct Entry { * following virtual methods (listed in the order in which they're called): * * - DoExecute (abstract, worker pool thread): main work - * - HandleOKCallback (main thread): call JS callback on success - * - HandleErrorCallback (main thread): call JS callback on error + * - HandleOKCallback (main thread): resolve JS promise on success + * - HandleErrorCallback (main thread): reject JS promise on error * - DoFinally (main thread): do cleanup regardless of success */ struct BaseWorker { // Note: storing env is discouraged as we'd end up using it in unsafe places. BaseWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, const char* resourceName) - : database_(database), errMsg_(NULL) { - NAPI_STATUS_THROWS_VOID(napi_create_reference(env, callback, 1, &callbackRef_)); + : database_(database), errMsg_(NULL), deferred_(deferred) { + // Note: napi_deferred is a strong reference to the JS promise, so there's no need to + // create a reference ourselves. See `v8_deferred = new v8::Persistent()` in: + // https://github.com/nodejs/node/commit/7efb8f7619100973877c660d0ee527ea3d92de8d + napi_value asyncResourceName; NAPI_STATUS_THROWS_VOID(napi_create_string_utf8(env, resourceName, NAPI_AUTO_LENGTH, &asyncResourceName)); - NAPI_STATUS_THROWS_VOID(napi_create_async_work(env, callback, + NAPI_STATUS_THROWS_VOID(napi_create_async_work(env, NULL, asyncResourceName, BaseWorker::Execute, BaseWorker::Complete, @@ -440,28 +467,29 @@ struct BaseWorker { } void DoComplete (napi_env env) { - napi_value callback; - napi_get_reference_value(env, callbackRef_, &callback); - if (status_.ok()) { - HandleOKCallback(env, callback); + HandleOKCallback(env, deferred_); } else { - HandleErrorCallback(env, callback); + HandleErrorCallback(env, deferred_); } } - virtual void HandleOKCallback (napi_env env, napi_value callback) { + virtual void HandleOKCallback (napi_env env, napi_deferred deferred) { napi_value argv; - napi_get_null(env, &argv); - CallFunction(env, callback, 1, &argv); + napi_get_undefined(env, &argv); + napi_resolve_deferred(env, deferred, argv); } - virtual void HandleErrorCallback (napi_env env, napi_value callback) { + virtual void HandleErrorCallback (napi_env env, napi_deferred deferred) { napi_value argv; if (status_.IsNotFound()) { - argv = CreateCodeError(env, "LEVEL_NOT_FOUND", errMsg_); - } else if (status_.IsCorruption()) { + napi_get_undefined(env, &argv); + napi_resolve_deferred(env, deferred, argv); + return; + } + + if (status_.IsCorruption()) { argv = CreateCodeError(env, "LEVEL_CORRUPTION", errMsg_); } else if (status_.IsIOError()) { if (strlen(errMsg_) > 15 && strncmp("IO error: lock ", errMsg_, 15) == 0) { // env_posix.cc @@ -475,13 +503,12 @@ struct BaseWorker { argv = CreateError(env, errMsg_); } - CallFunction(env, callback, 1, &argv); + napi_reject_deferred(env, deferred, argv); } virtual void DoFinally (napi_env env) { - napi_delete_reference(env, callbackRef_); napi_delete_async_work(env, asyncWork_); - + deferred_ = NULL; delete this; } @@ -492,7 +519,7 @@ struct BaseWorker { Database* database_; private: - napi_ref callbackRef_; + napi_deferred deferred_; napi_async_work asyncWork_; leveldb::Status status_; char *errMsg_; @@ -653,7 +680,7 @@ leveldb::Status threadsafe_open(const leveldb::Options &options, db_instance.db_ = handle.db; db_handles[db_instance.location_] = handle; } - + return status; } @@ -684,8 +711,8 @@ leveldb::Status threadsafe_close(Database &db_instance) { * Base worker class for doing async work that defers closing the database. */ struct PriorityWorker : public BaseWorker { - PriorityWorker (napi_env env, Database* database, napi_value callback, const char* resourceName) - : BaseWorker(env, database, callback, resourceName) { + PriorityWorker (napi_env env, Database* database, napi_deferred deferred, const char* resourceName) + : BaseWorker(env, database, deferred, resourceName) { database_->IncrementPriorityWork(env); } @@ -904,7 +931,8 @@ struct Iterator final : public BaseIterator { const bool fillCache, const Encoding keyEncoding, const Encoding valueEncoding, - const uint32_t highWaterMarkBytes) + const uint32_t highWaterMarkBytes, + unsigned char* state) : BaseIterator(database, reverse, lt, lte, gt, gte, limit, fillCache), id_(id), keys_(keys), @@ -915,7 +943,9 @@ struct Iterator final : public BaseIterator { first_(true), nexting_(false), isClosing_(false), - closeWorker_(NULL), + aborted_(false), + ended_(false), + state_(state), ref_(NULL) { } @@ -937,7 +967,7 @@ struct Iterator final : public BaseIterator { size_t bytesRead = 0; leveldb::Slice empty; - while (true) { + while (!aborted_) { if (!first_) Next(); else first_ = false; @@ -963,6 +993,7 @@ struct Iterator final : public BaseIterator { } } + ended_ = true; return false; } @@ -975,7 +1006,9 @@ struct Iterator final : public BaseIterator { bool first_; bool nexting_; bool isClosing_; - BaseWorker* closeWorker_; + bool aborted_; + bool ended_; + unsigned char* state_; std::vector cache_; private: @@ -990,7 +1023,7 @@ struct Iterator final : public BaseIterator { static void env_cleanup_hook (void* arg) { Database* database = (Database*)arg; - // Do everything that db_close() does but synchronously. We're expecting that GC + // Do everything that db.close() does but synchronously. We're expecting that GC // did not (yet) collect the database because that would be a user mistake (not // closing their db) made during the lifetime of the environment. That's different // from an environment being torn down (like the main process or a worker thread) @@ -1047,7 +1080,7 @@ NAPI_METHOD(db_init) { struct OpenWorker final : public BaseWorker { OpenWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, const std::string& location, const bool createIfMissing, const bool errorIfExists, @@ -1058,7 +1091,7 @@ struct OpenWorker final : public BaseWorker { const uint32_t maxOpenFiles, const uint32_t blockRestartInterval, const uint32_t maxFileSize) - : BaseWorker(env, database, callback, "classic_level.db.open"), + : BaseWorker(env, database, deferred, "classic_level.db.open"), location_(location), multithreading_(multithreading) { options_.block_cache = database->blockCache_; @@ -1090,9 +1123,10 @@ struct OpenWorker final : public BaseWorker { * Open a database. */ NAPI_METHOD(db_open) { - NAPI_ARGV(4); + NAPI_ARGV(3); NAPI_DB_CONTEXT(); NAPI_ARGV_UTF8_NEW(location, 1); + NAPI_PROMISE(); napi_value options = argv[2]; const bool createIfMissing = BooleanProperty(env, options, "createIfMissing", true); @@ -1110,27 +1144,27 @@ NAPI_METHOD(db_open) { database->blockCache_ = leveldb::NewLRUCache(cacheSize); - napi_value callback = argv[3]; - OpenWorker* worker = new OpenWorker(env, database, callback, location, - createIfMissing, errorIfExists, - compression, multithreading, - writeBufferSize, blockSize, - maxOpenFiles, blockRestartInterval, - maxFileSize); + OpenWorker* worker = new OpenWorker( + env, database, deferred, location, + createIfMissing, errorIfExists, + compression, multithreading, + writeBufferSize, blockSize, + maxOpenFiles, blockRestartInterval, + maxFileSize + ); + worker->Queue(env); delete [] location; - NAPI_RETURN_UNDEFINED(); + return promise; } /** * Worker class for closing a database */ struct CloseWorker final : public BaseWorker { - CloseWorker (napi_env env, - Database* database, - napi_value callback) - : BaseWorker(env, database, callback, "classic_level.db.close") {} + CloseWorker (napi_env env, Database* database, napi_deferred deferred) + : BaseWorker(env, database, deferred, "classic_level.db.close") {} ~CloseWorker () {} @@ -1147,30 +1181,22 @@ napi_value noop_callback (napi_env env, napi_callback_info info) { * Close a database. */ NAPI_METHOD(db_close) { - NAPI_ARGV(2); + NAPI_ARGV(1); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); + + // AbstractLevel should not call _close() before iterators are closed + assert(database->iterators_.size() == 0); - napi_value callback = argv[1]; - CloseWorker* worker = new CloseWorker(env, database, callback); + CloseWorker* worker = new CloseWorker(env, database, deferred); if (!database->HasPriorityWork()) { worker->Queue(env); - NAPI_RETURN_UNDEFINED(); + } else { + database->pendingCloseWorker_ = worker; } - database->pendingCloseWorker_ = worker; - - napi_value noop; - napi_create_function(env, NULL, 0, noop_callback, NULL, &noop); - - std::map iterators = database->iterators_; - std::map::iterator it; - - for (it = iterators.begin(); it != iterators.end(); ++it) { - iterator_close_do(env, it->second, noop); - } - - NAPI_RETURN_UNDEFINED(); + return promise; } /** @@ -1179,11 +1205,11 @@ NAPI_METHOD(db_close) { struct PutWorker final : public PriorityWorker { PutWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, leveldb::Slice key, leveldb::Slice value, bool sync) - : PriorityWorker(env, database, callback, "classic_level.db.put"), + : PriorityWorker(env, database, deferred, "classic_level.db.put"), key_(key), value_(value) { options_.sync = sync; } @@ -1206,18 +1232,18 @@ struct PutWorker final : public PriorityWorker { * Puts a key and a value to a database. */ NAPI_METHOD(db_put) { - NAPI_ARGV(5); + NAPI_ARGV(4); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); leveldb::Slice key = ToSlice(env, argv[1]); leveldb::Slice value = ToSlice(env, argv[2]); bool sync = BooleanProperty(env, argv[3], "sync", false); - napi_value callback = argv[4]; - PutWorker* worker = new PutWorker(env, database, callback, key, value, sync); + PutWorker* worker = new PutWorker(env, database, deferred, key, value, sync); worker->Queue(env); - NAPI_RETURN_UNDEFINED(); + return promise; } /** @@ -1226,14 +1252,15 @@ NAPI_METHOD(db_put) { struct GetWorker final : public PriorityWorker { GetWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, leveldb::Slice key, const Encoding encoding, const bool fillCache) - : PriorityWorker(env, database, callback, "classic_level.db.get"), + : PriorityWorker(env, database, deferred, "classic_level.db.get"), key_(key), encoding_(encoding) { options_.fill_cache = fillCache; + options_.snapshot = database->NewSnapshot(); } ~GetWorker () { @@ -1242,13 +1269,13 @@ struct GetWorker final : public PriorityWorker { void DoExecute () override { SetStatus(database_->Get(options_, key_, value_)); + database_->ReleaseSnapshot(options_.snapshot); } - void HandleOKCallback (napi_env env, napi_value callback) override { - napi_value argv[2]; - napi_get_null(env, &argv[0]); - Entry::Convert(env, &value_, encoding_, argv[1]); - CallFunction(env, callback, 2, argv); + void HandleOKCallback (napi_env env, napi_deferred deferred) override { + napi_value argv; + Entry::Convert(env, &value_, encoding_, argv); + napi_resolve_deferred(env, deferred, argv); } private: @@ -1264,18 +1291,18 @@ struct GetWorker final : public PriorityWorker { NAPI_METHOD(db_get) { NAPI_ARGV(4); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); leveldb::Slice key = ToSlice(env, argv[1]); - napi_value options = argv[2]; - const Encoding encoding = GetEncoding(env, options, "valueEncoding"); - const bool fillCache = BooleanProperty(env, options, "fillCache", true); - napi_value callback = argv[3]; + const Encoding encoding = GetEncoding(env, argv[2]); + const bool fillCache = BooleanValue(env, argv[3], true); - GetWorker* worker = new GetWorker(env, database, callback, key, encoding, - fillCache); - worker->Queue(env); + GetWorker* worker = new GetWorker( + env, database, deferred, key, encoding, fillCache + ); - NAPI_RETURN_UNDEFINED(); + worker->Queue(env); + return promise; } /** @@ -1285,10 +1312,10 @@ struct GetManyWorker final : public PriorityWorker { GetManyWorker (napi_env env, Database* database, std::vector keys, - napi_value callback, + napi_deferred deferred, const Encoding valueEncoding, const bool fillCache) - : PriorityWorker(env, database, callback, "classic_level.get.many"), + : PriorityWorker(env, database, deferred, "classic_level.get.many"), keys_(std::move(keys)), valueEncoding_(valueEncoding) { options_.fill_cache = fillCache; options_.snapshot = database->NewSnapshot(); @@ -1319,7 +1346,7 @@ struct GetManyWorker final : public PriorityWorker { database_->ReleaseSnapshot(options_.snapshot); } - void HandleOKCallback (napi_env env, napi_value callback) override { + void HandleOKCallback (napi_env env, napi_deferred deferred) override { size_t size = cache_.size(); napi_value array; napi_create_array_with_length(env, size, &array); @@ -1332,10 +1359,7 @@ struct GetManyWorker final : public PriorityWorker { if (value != NULL) delete value; } - napi_value argv[2]; - napi_get_null(env, &argv[0]); - argv[1] = array; - CallFunction(env, callback, 2, argv); + napi_resolve_deferred(env, deferred, array); } private: @@ -1349,21 +1373,21 @@ struct GetManyWorker final : public PriorityWorker { * Gets many values from a database. */ NAPI_METHOD(db_get_many) { - NAPI_ARGV(4); + NAPI_ARGV(3); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); const auto keys = KeyArray(env, argv[1]); napi_value options = argv[2]; const Encoding valueEncoding = GetEncoding(env, options, "valueEncoding"); const bool fillCache = BooleanProperty(env, options, "fillCache", true); - napi_value callback = argv[3]; GetManyWorker* worker = new GetManyWorker( - env, database, keys, callback, valueEncoding, fillCache + env, database, keys, deferred, valueEncoding, fillCache ); worker->Queue(env); - NAPI_RETURN_UNDEFINED(); + return promise; } /** @@ -1372,10 +1396,10 @@ NAPI_METHOD(db_get_many) { struct DelWorker final : public PriorityWorker { DelWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, leveldb::Slice key, bool sync) - : PriorityWorker(env, database, callback, "classic_level.db.del"), + : PriorityWorker(env, database, deferred, "classic_level.db.del"), key_(key) { options_.sync = sync; } @@ -1396,17 +1420,17 @@ struct DelWorker final : public PriorityWorker { * Delete a value from a database. */ NAPI_METHOD(db_del) { - NAPI_ARGV(4); + NAPI_ARGV(3); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); leveldb::Slice key = ToSlice(env, argv[1]); bool sync = BooleanProperty(env, argv[2], "sync", false); - napi_value callback = argv[3]; - DelWorker* worker = new DelWorker(env, database, callback, key, sync); + DelWorker* worker = new DelWorker(env, database, deferred, key, sync); worker->Queue(env); - NAPI_RETURN_UNDEFINED(); + return promise; } /** @@ -1415,14 +1439,14 @@ NAPI_METHOD(db_del) { struct ClearWorker final : public PriorityWorker { ClearWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, const bool reverse, const int limit, std::string* lt, std::string* lte, std::string* gt, std::string* gte) - : PriorityWorker(env, database, callback, "classic_level.db.clear") { + : PriorityWorker(env, database, deferred, "classic_level.db.clear") { iterator_ = new BaseIterator(database, reverse, lt, lte, gt, gte, limit, false); writeOptions_ = new leveldb::WriteOptions(); writeOptions_->sync = false; @@ -1473,11 +1497,11 @@ struct ClearWorker final : public PriorityWorker { * Delete a range from a database. */ NAPI_METHOD(db_clear) { - NAPI_ARGV(3); + NAPI_ARGV(2); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); napi_value options = argv[1]; - napi_value callback = argv[2]; const bool reverse = BooleanProperty(env, options, "reverse", false); const int limit = Int32Property(env, options, "limit", -1); @@ -1487,10 +1511,12 @@ NAPI_METHOD(db_clear) { std::string* gt = RangeOption(env, options, "gt"); std::string* gte = RangeOption(env, options, "gte"); - ClearWorker* worker = new ClearWorker(env, database, callback, reverse, limit, lt, lte, gt, gte); - worker->Queue(env); + ClearWorker* worker = new ClearWorker( + env, database, deferred, reverse, limit, lt, lte, gt, gte + ); - NAPI_RETURN_UNDEFINED(); + worker->Queue(env); + return promise; } /** @@ -1499,10 +1525,10 @@ NAPI_METHOD(db_clear) { struct ApproximateSizeWorker final : public PriorityWorker { ApproximateSizeWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, leveldb::Slice start, leveldb::Slice end) - : PriorityWorker(env, database, callback, "classic_level.db.approximate_size"), + : PriorityWorker(env, database, deferred, "classic_level.db.approximate_size"), start_(start), end_(end) {} ~ApproximateSizeWorker () { @@ -1515,11 +1541,10 @@ struct ApproximateSizeWorker final : public PriorityWorker { size_ = database_->ApproximateSize(&range); } - void HandleOKCallback (napi_env env, napi_value callback) override { - napi_value argv[2]; - napi_get_null(env, &argv[0]); - napi_create_int64(env, (uint64_t)size_, &argv[1]); - CallFunction(env, callback, 2, argv); + void HandleOKCallback (napi_env env, napi_deferred deferred) override { + napi_value argv; + napi_create_int64(env, (uint64_t)size_, &argv); + napi_resolve_deferred(env, deferred, argv); } leveldb::Slice start_; @@ -1531,20 +1556,19 @@ struct ApproximateSizeWorker final : public PriorityWorker { * Calculates the approximate size of a range in a database. */ NAPI_METHOD(db_approximate_size) { - NAPI_ARGV(4); + NAPI_ARGV(3); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); leveldb::Slice start = ToSlice(env, argv[1]); leveldb::Slice end = ToSlice(env, argv[2]); - napi_value callback = argv[3]; + ApproximateSizeWorker* worker = new ApproximateSizeWorker( + env, database, deferred, start, end + ); - ApproximateSizeWorker* worker = new ApproximateSizeWorker(env, database, - callback, start, - end); worker->Queue(env); - - NAPI_RETURN_UNDEFINED(); + return promise; } /** @@ -1553,10 +1577,10 @@ NAPI_METHOD(db_approximate_size) { struct CompactRangeWorker final : public PriorityWorker { CompactRangeWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, leveldb::Slice start, leveldb::Slice end) - : PriorityWorker(env, database, callback, "classic_level.db.compact_range"), + : PriorityWorker(env, database, deferred, "classic_level.db.compact_range"), start_(start), end_(end) {} ~CompactRangeWorker () { @@ -1576,18 +1600,19 @@ struct CompactRangeWorker final : public PriorityWorker { * Compacts a range in a database. */ NAPI_METHOD(db_compact_range) { - NAPI_ARGV(4); + NAPI_ARGV(3); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); leveldb::Slice start = ToSlice(env, argv[1]); leveldb::Slice end = ToSlice(env, argv[2]); - napi_value callback = argv[3]; - CompactRangeWorker* worker = new CompactRangeWorker(env, database, callback, - start, end); - worker->Queue(env); + CompactRangeWorker* worker = new CompactRangeWorker( + env, database, deferred, start, end + ); - NAPI_RETURN_UNDEFINED(); + worker->Queue(env); + return promise; } /** @@ -1614,10 +1639,8 @@ NAPI_METHOD(db_get_property) { * Worker class for destroying a database. */ struct DestroyWorker final : public BaseWorker { - DestroyWorker (napi_env env, - const std::string& location, - napi_value callback) - : BaseWorker(env, NULL, callback, "classic_level.destroy_db"), + DestroyWorker (napi_env env, const std::string& location, napi_deferred deferred) + : BaseWorker(env, NULL, deferred, "classic_level.destroy_db"), location_(location) {} ~DestroyWorker () {} @@ -1634,26 +1657,23 @@ struct DestroyWorker final : public BaseWorker { * Destroys a database. */ NAPI_METHOD(destroy_db) { - NAPI_ARGV(2); + NAPI_ARGV(1); NAPI_ARGV_UTF8_NEW(location, 0); - napi_value callback = argv[1]; + NAPI_PROMISE(); - DestroyWorker* worker = new DestroyWorker(env, location, callback); + DestroyWorker* worker = new DestroyWorker(env, location, deferred); worker->Queue(env); delete [] location; - - NAPI_RETURN_UNDEFINED(); + return promise; } /** * Worker class for repairing a database. */ struct RepairWorker final : public BaseWorker { - RepairWorker (napi_env env, - const std::string& location, - napi_value callback) - : BaseWorker(env, NULL, callback, "classic_level.repair_db"), + RepairWorker (napi_env env, const std::string& location, napi_deferred deferred) + : BaseWorker(env, NULL, deferred, "classic_level.repair_db"), location_(location) {} ~RepairWorker () {} @@ -1670,16 +1690,16 @@ struct RepairWorker final : public BaseWorker { * Repairs a database. */ NAPI_METHOD(repair_db) { - NAPI_ARGV(2); + NAPI_ARGV(1); NAPI_ARGV_UTF8_NEW(location, 0); - napi_value callback = argv[1]; + NAPI_PROMISE(); - RepairWorker* worker = new RepairWorker(env, location, callback); + RepairWorker* worker = new RepairWorker(env, location, deferred); worker->Queue(env); delete [] location; - NAPI_RETURN_UNDEFINED(); + return promise; } /** @@ -1695,10 +1715,15 @@ static void FinalizeIterator (napi_env env, void* data, void* hint) { * Create an iterator. */ NAPI_METHOD(iterator_init) { - NAPI_ARGV(2); + NAPI_ARGV(3); NAPI_DB_CONTEXT(); - napi_value options = argv[1]; + unsigned char* state = 0; + size_t stateLength; + NAPI_STATUS_THROWS(napi_get_typedarray_info(env, argv[1], NULL, &stateLength, (void**)&state, NULL, NULL)); + assert(stateLength == 1); + + napi_value options = argv[2]; const bool reverse = BooleanProperty(env, options, "reverse", false); const bool keys = BooleanProperty(env, options, "keys", true); const bool values = BooleanProperty(env, options, "values", true); @@ -1716,7 +1741,8 @@ NAPI_METHOD(iterator_init) { const uint32_t id = database->currentIteratorId_++; Iterator* iterator = new Iterator(database, id, reverse, keys, values, limit, lt, lte, gt, gte, fillCache, - keyEncoding, valueEncoding, highWaterMarkBytes); + keyEncoding, valueEncoding, highWaterMarkBytes, + state); napi_value result; NAPI_STATUS_THROWS(napi_create_external(env, iterator, @@ -1737,12 +1763,13 @@ NAPI_METHOD(iterator_seek) { NAPI_ARGV(2); NAPI_ITERATOR_CONTEXT(); - if (iterator->isClosing_ || iterator->hasClosed_) { - NAPI_RETURN_UNDEFINED(); - } + // AbstractIterator should not call _seek() after _close() + assert(!iterator->isClosing_); + assert(!iterator->hasClosed_); leveldb::Slice target = ToSlice(env, argv[1]); iterator->first_ = true; + iterator->ended_ = false; iterator->Seek(target); DisposeSliceBuffer(target); @@ -1753,10 +1780,8 @@ NAPI_METHOD(iterator_seek) { * Worker class for closing an iterator */ struct CloseIteratorWorker final : public BaseWorker { - CloseIteratorWorker (napi_env env, - Iterator* iterator, - napi_value callback) - : BaseWorker(env, iterator->database_, callback, "classic_level.iterator.close"), + CloseIteratorWorker (napi_env env, Iterator* iterator, napi_deferred deferred) + : BaseWorker(env, iterator->database_, deferred, "classic_level.iterator.close"), iterator_(iterator) {} ~CloseIteratorWorker () {} @@ -1775,31 +1800,34 @@ struct CloseIteratorWorker final : public BaseWorker { }; /** - * Called by NAPI_METHOD(iterator_close) and also when closing - * open iterators during NAPI_METHOD(db_close). + * Closes an iterator. */ -static void iterator_close_do (napi_env env, Iterator* iterator, napi_value cb) { - if (!iterator->isClosing_ && !iterator->hasClosed_) { - CloseIteratorWorker* worker = new CloseIteratorWorker(env, iterator, cb); - iterator->isClosing_ = true; +NAPI_METHOD(iterator_close) { + NAPI_ARGV(1); + NAPI_ITERATOR_CONTEXT(); + NAPI_PROMISE(); - if (iterator->nexting_) { - iterator->closeWorker_ = worker; - } else { - worker->Queue(env); - } - } + // AbstractIterator should not call _close() more than once + assert(!iterator->isClosing_); + assert(!iterator->hasClosed_); + + // AbstractIterator should not call _close() while next() is in-progress + assert(!iterator->nexting_); + + CloseIteratorWorker* worker = new CloseIteratorWorker(env, iterator, deferred); + iterator->isClosing_ = true; + worker->Queue(env); + + return promise; } /** - * Closes an iterator. + * Aborts a NextWorker (if any, eventually). */ -NAPI_METHOD(iterator_close) { - NAPI_ARGV(2); +NAPI_METHOD(iterator_abort) { + NAPI_ARGV(1); NAPI_ITERATOR_CONTEXT(); - - iterator_close_do(env, iterator, argv[1]); - + iterator->aborted_ = true; NAPI_RETURN_UNDEFINED(); } @@ -1807,12 +1835,8 @@ NAPI_METHOD(iterator_close) { * Worker class for nexting an iterator. */ struct NextWorker final : public BaseWorker { - NextWorker (napi_env env, - Iterator* iterator, - uint32_t size, - napi_value callback) - : BaseWorker(env, iterator->database_, callback, - "classic_level.iterator.next"), + NextWorker (napi_env env, Iterator* iterator, uint32_t size, napi_deferred deferred) + : BaseWorker(env, iterator->database_, deferred, "classic_level.iterator.next"), iterator_(iterator), size_(size), ok_() {} ~NextWorker () {} @@ -1829,7 +1853,16 @@ struct NextWorker final : public BaseWorker { } } - void HandleOKCallback (napi_env env, napi_value callback) override { + void HandleOKCallback (napi_env env, napi_deferred deferred) override { + if (iterator_->aborted_) { + napi_value err = CreateCodeError(env, "LEVEL_ABORTED", "Operation has been aborted"); + napi_value name; + napi_create_string_utf8(env, "AbortError", NAPI_AUTO_LENGTH, &name); + napi_set_named_property(env, err, "name", name); + napi_reject_deferred(env, deferred, err); + return; + } + size_t size = iterator_->cache_.size(); napi_value jsArray; napi_create_array_with_length(env, size, &jsArray); @@ -1843,22 +1876,16 @@ struct NextWorker final : public BaseWorker { napi_set_element(env, jsArray, idx, element); } - napi_value argv[3]; - napi_get_null(env, &argv[0]); - argv[1] = jsArray; - napi_get_boolean(env, !ok_, &argv[2]); - CallFunction(env, callback, 3, argv); + // TODO: use state_ internally too, replacing ended_? + if (iterator_->ended_) { + *iterator_->state_ |= STATE_ENDED; + } + + napi_resolve_deferred(env, deferred, jsArray); } void DoFinally (napi_env env) override { - // clean up & handle the next/close state iterator_->nexting_ = false; - - if (iterator_->closeWorker_ != NULL) { - iterator_->closeWorker_->Queue(env); - iterator_->closeWorker_ = NULL; - } - BaseWorker::DoFinally(env); } @@ -1872,26 +1899,29 @@ struct NextWorker final : public BaseWorker { * Advance repeatedly and get multiple entries at once. */ NAPI_METHOD(iterator_nextv) { - NAPI_ARGV(3); + NAPI_ARGV(2); NAPI_ITERATOR_CONTEXT(); + NAPI_PROMISE(); uint32_t size; NAPI_STATUS_THROWS(napi_get_value_uint32(env, argv[1], &size)); if (size == 0) size = 1; - napi_value callback = argv[2]; - - if (iterator->isClosing_ || iterator->hasClosed_) { - napi_value argv = CreateCodeError(env, "LEVEL_ITERATOR_NOT_OPEN", "Iterator is not open"); - NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &argv)); - NAPI_RETURN_UNDEFINED(); + // AbstractIterator should not call _next() or _nextv() after _close() + assert(!iterator->isClosing_); + assert(!iterator->hasClosed_); + + if (iterator->ended_) { + napi_value empty; + napi_create_array_with_length(env, 0, &empty); + napi_resolve_deferred(env, deferred, empty); + } else { + NextWorker* worker = new NextWorker(env, iterator, size, deferred); + iterator->nexting_ = true; + worker->Queue(env); } - NextWorker* worker = new NextWorker(env, iterator, size, callback); - iterator->nexting_ = true; - worker->Queue(env); - - NAPI_RETURN_UNDEFINED(); + return promise; } /** @@ -1900,11 +1930,11 @@ NAPI_METHOD(iterator_nextv) { struct BatchWorker final : public PriorityWorker { BatchWorker (napi_env env, Database* database, - napi_value callback, + napi_deferred deferred, leveldb::WriteBatch* batch, const bool sync, const bool hasData) - : PriorityWorker(env, database, callback, "classic_level.batch.do"), + : PriorityWorker(env, database, deferred, "classic_level.batch.do"), batch_(batch), hasData_(hasData) { options_.sync = sync; } @@ -1929,12 +1959,12 @@ struct BatchWorker final : public PriorityWorker { * Does a batch write operation on a database. */ NAPI_METHOD(batch_do) { - NAPI_ARGV(4); + NAPI_ARGV(3); NAPI_DB_CONTEXT(); + NAPI_PROMISE(); napi_value array = argv[1]; const bool sync = BooleanProperty(env, argv[2], "sync", false); - napi_value callback = argv[3]; uint32_t length; napi_get_array_length(env, array, &length); @@ -1973,10 +2003,12 @@ NAPI_METHOD(batch_do) { } } - BatchWorker* worker = new BatchWorker(env, database, callback, batch, sync, hasData); - worker->Queue(env); + BatchWorker* worker = new BatchWorker( + env, database, deferred, batch, sync, hasData + ); - NAPI_RETURN_UNDEFINED(); + worker->Queue(env); + return promise; } /** @@ -2092,9 +2124,9 @@ struct BatchWriteWorker final : public PriorityWorker { BatchWriteWorker (napi_env env, napi_value context, Batch* batch, - napi_value callback, + napi_deferred deferred, const bool sync) - : PriorityWorker(env, batch->database_, callback, "classic_level.batch.write"), + : PriorityWorker(env, batch->database_, deferred, "classic_level.batch.write"), batch_(batch), sync_(sync) { // Prevent GC of batch object before we execute @@ -2124,17 +2156,19 @@ struct BatchWriteWorker final : public PriorityWorker { * Writes a batch object. */ NAPI_METHOD(batch_write) { - NAPI_ARGV(3); + NAPI_ARGV(2); NAPI_BATCH_CONTEXT(); + NAPI_PROMISE(); napi_value options = argv[1]; const bool sync = BooleanProperty(env, options, "sync", false); - napi_value callback = argv[2]; - BatchWriteWorker* worker = new BatchWriteWorker(env, argv[0], batch, callback, sync); - worker->Queue(env); + BatchWriteWorker* worker = new BatchWriteWorker( + env, argv[0], batch, deferred, sync + ); - NAPI_RETURN_UNDEFINED(); + worker->Queue(env); + return promise; } /** @@ -2160,6 +2194,7 @@ NAPI_INIT() { NAPI_EXPORT_FUNCTION(iterator_seek); NAPI_EXPORT_FUNCTION(iterator_close); NAPI_EXPORT_FUNCTION(iterator_nextv); + NAPI_EXPORT_FUNCTION(iterator_abort); NAPI_EXPORT_FUNCTION(batch_do); NAPI_EXPORT_FUNCTION(batch_init); diff --git a/chained-batch.js b/chained-batch.js index ac343f1..b464dd4 100644 --- a/chained-batch.js +++ b/chained-batch.js @@ -23,13 +23,12 @@ class ChainedBatch extends AbstractChainedBatch { binding.batch_clear(this[kContext]) } - _write (options, callback) { - binding.batch_write(this[kContext], options, callback) + async _write (options) { + return binding.batch_write(this[kContext], options) } - _close (callback) { + async _close () { // TODO: close native batch (currently done on GC) - process.nextTick(callback) } } diff --git a/index.d.ts b/index.d.ts index 4f3cfb0..3d1398b 100644 --- a/index.d.ts +++ b/index.d.ts @@ -16,8 +16,7 @@ import { AbstractKeyIteratorOptions, AbstractValueIterator, AbstractValueIteratorOptions, - Transcoder, - NodeCallback + Transcoder } from 'abstract-level' /** @@ -48,33 +47,21 @@ declare class ClassicLevel open (): Promise open (options: OpenOptions): Promise - open (callback: NodeCallback): void - open (options: OpenOptions, callback: NodeCallback): void get (key: KDefault): Promise - get (key: KDefault, callback: NodeCallback): void get (key: K, options: GetOptions): Promise - get (key: K, options: GetOptions, callback: NodeCallback): void getMany (keys: KDefault[]): Promise - getMany (keys: KDefault[], callback: NodeCallback): void getMany (keys: K[], options: GetManyOptions): Promise - getMany (keys: K[], options: GetManyOptions, callback: NodeCallback): void put (key: KDefault, value: VDefault): Promise - put (key: KDefault, value: VDefault, callback: NodeCallback): void put (key: K, value: V, options: PutOptions): Promise - put (key: K, value: V, options: PutOptions, callback: NodeCallback): void del (key: KDefault): Promise - del (key: KDefault, callback: NodeCallback): void del (key: K, options: DelOptions): Promise - del (key: K, options: DelOptions, callback: NodeCallback): void batch (operations: Array>): Promise - batch (operations: Array>, callback: NodeCallback): void batch (operations: Array>, options: BatchOptions): Promise - batch (operations: Array>, options: BatchOptions, callback: NodeCallback): void batch (): ChainedBatch iterator (): Iterator @@ -91,17 +78,13 @@ declare class ClassicLevel * `[start..end)`. */ approximateSize (start: KDefault, end: KDefault): Promise - approximateSize (start: KDefault, end: KDefault, callback: NodeCallback): void approximateSize (start: K, end: K, options: StartEndOptions): Promise - approximateSize (start: K, end: K, options: StartEndOptions, callback: NodeCallback): void /** * Manually trigger a database compaction in the range `[start..end)`. */ compactRange (start: KDefault, end: KDefault): Promise - compactRange (start: KDefault, end: KDefault, callback: NodeCallback): void compactRange (start: K, end: K, options: StartEndOptions): Promise - compactRange (start: K, end: K, options: StartEndOptions, callback: NodeCallback): void /** * Get internal details from LevelDB. @@ -113,14 +96,12 @@ declare class ClassicLevel * place of a full directory removal to only remove LevelDB-related files. */ static destroy (location: string): Promise - static destroy (location: string, callback: NodeCallback): void /** * Attempt a restoration of a damaged database. Can also be used to perform * a compaction of the LevelDB log into table files. */ static repair (location: string): Promise - static repair (location: string, callback: NodeCallback): void } /** @@ -311,8 +292,6 @@ export interface ChainedBatchWriteOptions extends AbstractChainedBatchWriteOptio export class ChainedBatch extends AbstractChainedBatch { write (): Promise write (options: ChainedBatchWriteOptions): Promise - write (callback: NodeCallback): void - write (options: ChainedBatchWriteOptions, callback: NodeCallback): void } /** diff --git a/index.js b/index.js index 32a098d..4d624b8 100644 --- a/index.js +++ b/index.js @@ -2,25 +2,16 @@ const { AbstractLevel } = require('abstract-level') const ModuleError = require('module-error') -const { fromCallback } = require('catering') -const fs = require('fs') +const fsp = require('fs/promises') const binding = require('./binding') const { ChainedBatch } = require('./chained-batch') const { Iterator } = require('./iterator') -const kPromise = Symbol('promise') const kContext = Symbol('context') const kLocation = Symbol('location') class ClassicLevel extends AbstractLevel { constructor (location, options, _) { - // To help migrating to abstract-level - if (typeof options === 'function' || typeof _ === 'function') { - throw new ModuleError('The levelup-style callback argument has been removed', { - code: 'LEVEL_LEGACY' - }) - } - if (typeof location !== 'string' || location === '') { throw new TypeError("The first argument 'location' must be a non-empty string") } @@ -37,6 +28,9 @@ class ClassicLevel extends AbstractLevel { additionalMethods: { approximateSize: true, compactRange: true + }, + signals: { + iterators: true } }, options) @@ -48,103 +42,91 @@ class ClassicLevel extends AbstractLevel { return this[kLocation] } - _open (options, callback) { + async _open (options) { if (options.createIfMissing) { - fs.mkdir(this[kLocation], { recursive: true }, (err) => { - if (err) return callback(err) - binding.db_open(this[kContext], this[kLocation], options, callback) - }) - } else { - binding.db_open(this[kContext], this[kLocation], options, callback) + await fsp.mkdir(this[kLocation], { recursive: true }) } + + return binding.db_open(this[kContext], this[kLocation], options) } - _close (callback) { - binding.db_close(this[kContext], callback) + async _close () { + return binding.db_close(this[kContext]) } - _put (key, value, options, callback) { - binding.db_put(this[kContext], key, value, options, callback) + async _put (key, value, options) { + return binding.db_put(this[kContext], key, value, options) } - _get (key, options, callback) { - binding.db_get(this[kContext], key, options, callback) + async _get (key, options) { + return binding.db_get( + this[kContext], + key, + encodingEnum(options.valueEncoding), + options.fillCache + ) } - _getMany (keys, options, callback) { - binding.db_get_many(this[kContext], keys, options, callback) + async _getMany (keys, options) { + return binding.db_get_many(this[kContext], keys, options) } - _del (key, options, callback) { - binding.db_del(this[kContext], key, options, callback) + async _del (key, options) { + return binding.db_del(this[kContext], key, options) } - _clear (options, callback) { - binding.db_clear(this[kContext], options, callback) + async _clear (options) { + return binding.db_clear(this[kContext], options) } _chainedBatch () { return new ChainedBatch(this, this[kContext]) } - _batch (operations, options, callback) { - binding.batch_do(this[kContext], operations, options, callback) + async _batch (operations, options) { + return binding.batch_do(this[kContext], operations, options) } - approximateSize (start, end, options, callback) { - if (arguments.length < 2 || typeof start === 'function' || typeof end === 'function') { + async approximateSize (start, end, options) { + if (arguments.length < 2) { throw new TypeError("The arguments 'start' and 'end' are required") - } else if (typeof options === 'function') { - callback = options - options = null } else if (typeof options !== 'object') { options = null } - callback = fromCallback(callback, kPromise) - if (this.status === 'opening') { - this.defer(() => this.approximateSize(start, end, options, callback)) + return this.deferAsync(() => this.approximateSize(start, end, options)) } else if (this.status !== 'open') { - this.nextTick(callback, new ModuleError('Database is not open: cannot call approximateSize()', { + throw new ModuleError('Database is not open: cannot call approximateSize()', { code: 'LEVEL_DATABASE_NOT_OPEN' - })) + }) } else { const keyEncoding = this.keyEncoding(options && options.keyEncoding) start = keyEncoding.encode(start) end = keyEncoding.encode(end) - binding.db_approximate_size(this[kContext], start, end, callback) + return binding.db_approximate_size(this[kContext], start, end) } - - return callback[kPromise] } - compactRange (start, end, options, callback) { - if (arguments.length < 2 || typeof start === 'function' || typeof end === 'function') { + async compactRange (start, end, options) { + if (arguments.length < 2) { throw new TypeError("The arguments 'start' and 'end' are required") - } else if (typeof options === 'function') { - callback = options - options = null } else if (typeof options !== 'object') { options = null } - callback = fromCallback(callback, kPromise) - if (this.status === 'opening') { - this.defer(() => this.compactRange(start, end, options, callback)) + return this.deferAsync(() => this.compactRange(start, end, options)) } else if (this.status !== 'open') { - this.nextTick(callback, new ModuleError('Database is not open: cannot call compactRange()', { + throw new ModuleError('Database is not open: cannot call compactRange()', { code: 'LEVEL_DATABASE_NOT_OPEN' - })) + }) } else { const keyEncoding = this.keyEncoding(options && options.keyEncoding) start = keyEncoding.encode(start) end = keyEncoding.encode(end) - binding.db_compact_range(this[kContext], start, end, callback) + return binding.db_compact_range(this[kContext], start, end) } - - return callback[kPromise] } getProperty (property) { @@ -166,25 +148,30 @@ class ClassicLevel extends AbstractLevel { return new Iterator(this, this[kContext], options) } - static destroy (location, callback) { + static async destroy (location) { if (typeof location !== 'string' || location === '') { throw new TypeError("The first argument 'location' must be a non-empty string") } - callback = fromCallback(callback, kPromise) - binding.destroy_db(location, callback) - return callback[kPromise] + return binding.destroy_db(location) } - static repair (location, callback) { + static async repair (location) { if (typeof location !== 'string' || location === '') { throw new TypeError("The first argument 'location' must be a non-empty string") } - callback = fromCallback(callback, kPromise) - binding.repair_db(location, callback) - return callback[kPromise] + return binding.repair_db(location) } } exports.ClassicLevel = ClassicLevel + +// It's faster to read options in JS than to pass options objects to C++. +const encodingEnum = function (encoding) { + if (encoding === 'buffer') return 0 + if (encoding === 'utf8') return 1 + + /* istanbul ignore else: should not happen */ + if (encoding === 'view') return 2 +} diff --git a/iterator.js b/iterator.js index 26212c7..cc8d29d 100644 --- a/iterator.js +++ b/iterator.js @@ -5,14 +5,16 @@ const binding = require('./binding') const kContext = Symbol('context') const kCache = Symbol('cache') -const kFinished = Symbol('finished') const kFirst = Symbol('first') const kPosition = Symbol('position') -const kHandleNext = Symbol('handleNext') -const kHandleNextv = Symbol('handleNextv') -const kCallback = Symbol('callback') +const kState = Symbol('state') +const kSignal = Symbol('signal') +const kAbort = Symbol('abort') const empty = [] +// Bit fields +const STATE_ENDED = 1 + // Does not implement _all() because the default implementation // of abstract-level falls back to nextv(1000) and using all() // on more entries than that probably isn't a realistic use case, @@ -22,79 +24,92 @@ class Iterator extends AbstractIterator { constructor (db, context, options) { super(db, options) - this[kContext] = binding.iterator_init(context, options) - this[kHandleNext] = this[kHandleNext].bind(this) - this[kHandleNextv] = this[kHandleNextv].bind(this) - this[kCallback] = null + this[kState] = new Uint8Array(1) + this[kContext] = binding.iterator_init(context, this[kState], options) this[kFirst] = true this[kCache] = empty - this[kFinished] = false this[kPosition] = 0 + this[kAbort] = this[kAbort].bind(this) + + // TODO: consider exposing iterator.signal in abstract-level + if (options.signal != null) { + this[kSignal] = options.signal + this[kSignal].addEventListener('abort', this[kAbort], { once: true }) + } else { + this[kSignal] = null + } } _seek (target, options) { this[kFirst] = true this[kCache] = empty - this[kFinished] = false + this[kState][0] &= ~STATE_ENDED // Unset this[kPosition] = 0 binding.iterator_seek(this[kContext], target) } - _next (callback) { + async _next () { if (this[kPosition] < this[kCache].length) { - const entry = this[kCache][this[kPosition]++] - process.nextTick(callback, null, entry[0], entry[1]) - } else if (this[kFinished]) { - process.nextTick(callback) + return this[kCache][this[kPosition]++] + } + + // Avoid iterator_nextv() call if end was already reached + if ((this[kState][0] & STATE_ENDED) !== 0) { + return undefined + } + + if (this[kFirst]) { + // It's common to only want one entry initially or after a seek() + this[kFirst] = false + this[kCache] = await binding.iterator_nextv(this[kContext], 1) + this[kPosition] = 0 } else { - this[kCallback] = callback - - if (this[kFirst]) { - // It's common to only want one entry initially or after a seek() - this[kFirst] = false - binding.iterator_nextv(this[kContext], 1, this[kHandleNext]) - } else { - // Limit the size of the cache to prevent starving the event loop - // while we're recursively calling process.nextTick(). - binding.iterator_nextv(this[kContext], 1000, this[kHandleNext]) - } + // Limit the size of the cache to prevent starving the event loop + // while we're recursively nexting. + this[kCache] = await binding.iterator_nextv(this[kContext], 1000) + this[kPosition] = 0 + } + + if (this[kPosition] < this[kCache].length) { + return this[kCache][this[kPosition]++] } } - [kHandleNext] (err, items, finished) { - const callback = this[kCallback] - if (err) return callback(err) + async _nextv (size, options) { + this[kFirst] = false - this[kCache] = items - this[kFinished] = finished - this[kPosition] = 0 + // If next() was called then empty the cache first + if (this[kPosition] < this[kCache].length) { + const length = Math.min(size, this[kCache].length - this[kPosition]) + const chunk = this[kCache].slice(this[kPosition], this[kPosition] + length) - this._next(callback) - } + this[kPosition] += length + return chunk + } - _nextv (size, options, callback) { - if (this[kFinished]) { - process.nextTick(callback, null, []) - } else { - this[kCallback] = callback - this[kFirst] = false - binding.iterator_nextv(this[kContext], size, this[kHandleNextv]) + // Avoid iterator_nextv() call if end was already reached + if ((this[kState][0] & STATE_ENDED) !== 0) { + return [] } - } - [kHandleNextv] (err, items, finished) { - const callback = this[kCallback] - if (err) return callback(err) - this[kFinished] = finished - callback(null, items) + return binding.iterator_nextv(this[kContext], size) } - _close (callback) { + async _close () { this[kCache] = empty - this[kCallback] = null - binding.iterator_close(this[kContext], callback) + if (this[kSignal] !== null) { + this[kSignal].removeEventListener('abort', this[kAbort]) + this[kSignal] = null + } + + return binding.iterator_close(this[kContext]) + } + + [kAbort] () { + this[kSignal] = null + binding.iterator_abort(this[kContext]) } // Undocumented, exposed for tests only diff --git a/package.json b/package.json index b8df8ac..e609682 100644 --- a/package.json +++ b/package.json @@ -7,7 +7,8 @@ "types": "./index.d.ts", "scripts": { "install": "node-gyp-build", - "test": "standard && (nyc -s tape test/*-test.js | faucet) && nyc report", + "test": "standard && (nyc -s tape test/*-test.js | tap-arc) && nyc report", + "test-pessimistic": "tape test/*-test.js | tap-arc -pv", "test-gc": "node --expose-gc test/gc.js", "test-electron": "electron test/electron.js", "test-prebuild": "cross-env PREBUILDS_ONLY=1 npm t", @@ -26,8 +27,7 @@ "prebuild-win32-x64": "prebuildify -t 8.14.0 --napi --strip" }, "dependencies": { - "abstract-level": "^1.0.2", - "catering": "^2.1.0", + "abstract-level": "^2.0.0", "module-error": "^1.0.1", "napi-macros": "^2.2.2", "node-gyp-build": "^4.3.0" @@ -35,13 +35,10 @@ "devDependencies": { "@types/node": "^18.0.0", "@voxpelli/tsconfig": "^4.0.0", - "async-each": "^1.0.3", "cross-env": "^7.0.3", - "delayed": "^2.0.0", "dependency-check": "^4.1.0", "du": "^1.0.0", - "electron": "^21.0.1", - "faucet": "^0.0.3", + "electron": "^18.3.15", "glob": "^8.0.1", "hallmark": "^4.1.0", "mkfiletree": "^2.0.0", @@ -53,6 +50,7 @@ "readfiletree": "^1.0.0", "rimraf": "^3.0.0", "standard": "^17.0.0", + "tap-arc": "^0.3.5", "tape": "^5.5.0", "tempy": "^1.0.1", "typescript": "^4.5.5" @@ -68,6 +66,6 @@ "level" ], "engines": { - "node": ">=12" + "node": ">=16" } } diff --git a/test/approximate-size-test.js b/test/approximate-size-test.js index a4e242e..152e879 100644 --- a/test/approximate-size-test.js +++ b/test/approximate-size-test.js @@ -2,65 +2,59 @@ const test = require('tape') const testCommon = require('./common') -const noop = () => {} let db -test('setUp db', function (t) { +test('approximateSize() setup', async function (t) { db = testCommon.factory() - db.open(t.end.bind(t)) + return db.open() }) -test('test approximateSize() throws if arguments are missing', function (t) { - for (const args of [[], ['foo'], [noop], ['foo', noop]]) { - t.throws(() => db.approximateSize(...args), { - name: 'TypeError', - message: "The arguments 'start' and 'end' are required" - }) +test('approximateSize() throws if arguments are missing', async function (t) { + t.plan(2 * 2) + + for (const args of [[], ['foo']]) { + try { + await db.approximateSize(...args) + } catch (err) { + t.is(err.name, 'TypeError') + t.is(err.message, "The arguments 'start' and 'end' are required") + } } - t.end() }) -test('test approximateSize()', function (t) { +test('approximateSize()', async function (t) { const data = Array.apply(null, Array(10000)).map(function () { return 'aaaaaaaaaa' }).join('') - db.batch(Array.apply(null, Array(10)).map(function (x, i) { + await db.batch(Array.apply(null, Array(10)).map(function (x, i) { return { type: 'put', key: 'foo' + i, value: data } - }), function (err) { - t.error(err) - - // cycle open/close to ensure a pack to .sst + })) - db.close(function (err) { - t.error(err) - - db.open(function (err) { - t.error(err) + // cycle open/close to ensure a pack to .sst + await db.close() + await db.open() - db.approximateSize('!', '~', function (err, size) { - t.error(err) + const size = await db.approximateSize('!', '~') - t.equal(typeof size, 'number') - // account for snappy compression, original would be ~100000 - t.ok(size > 40000, 'size reports a reasonable amount (' + size + ')') - t.end() - }) - }) - }) - }) + t.equal(typeof size, 'number') + // account for snappy compression, original would be ~100000 + t.ok(size > 40000, 'size reports a reasonable amount (' + size + ')') }) -test('tearDown', function (t) { - db.close(t.end.bind(t)) +test('approximateSize() teardown', async function (t) { + return db.close() }) -test('test approximateSize() yields error if db is closed', function (t) { - db.approximateSize('foo', 'foo', function (err) { - t.is(err && err.code, 'LEVEL_DATABASE_NOT_OPEN') - t.end() - }) +test('approximateSize() yields error if db is closed', async function (t) { + t.plan(1) + + try { + await db.approximateSize('foo', 'foo') + } catch (err) { + t.is(err.code, 'LEVEL_DATABASE_NOT_OPEN') + } }) test('test approximateSize() is deferred', async function (t) { diff --git a/test/chained-batch-gc-test.js b/test/chained-batch-gc-test.js index 1981efc..4f05de9 100644 --- a/test/chained-batch-gc-test.js +++ b/test/chained-batch-gc-test.js @@ -5,33 +5,28 @@ const testCommon = require('./common') // When we have a chained batch object without a reference, V8 might GC it // before we get a chance to (asynchronously) write the batch. -test('chained batch without ref does not get GCed before write', function (t) { - t.plan(2) - +test('chained batch without ref does not get GCed before write', async function (t) { const db = testCommon.factory() + await db.open() - db.open(function (err) { - t.ifError(err, 'no open error') + let batch = db.batch() - let batch = db.batch() + for (let i = 0; i < 1e3; i++) { + batch.put(String(i), 'value') + } - for (let i = 0; i < 1e3; i++) { - batch.put(String(i), 'value') - } + // The sync option makes the operation slower and thus more likely to + // cause a segfault (if the batch were to be GC-ed before it is written). + const promise = batch.write({ sync: true }) - // The sync option makes the operation slower and thus more likely to - // cause a segfault (if the batch were to be GC-ed before it is written). - batch.write({ sync: true }, function (err) { - t.ifError(err, 'no error from write()') - }) + // Remove reference + batch = null - // Remove reference - batch = null + if (global.gc) { + // This is the reliable way to trigger GC (and the bug if it exists). + // Useful for manual testing with "node --expose-gc". + global.gc() + } - if (global.gc) { - // This is the reliable way to trigger GC (and the bug if it exists). - // Useful for manual testing with "node --expose-gc". - global.gc() - } - }) + return promise }) diff --git a/test/cleanup-hanging-iterators-test.js b/test/cleanup-hanging-iterators-test.js index dc62788..c7d6d18 100644 --- a/test/cleanup-hanging-iterators-test.js +++ b/test/cleanup-hanging-iterators-test.js @@ -3,116 +3,142 @@ const makeTest = require('./make') const repeats = 200 -makeTest('test closed iterator', function (db, t, done) { +makeTest('closed iterator', async function (db, t) { // First test normal and proper usage: calling it.close() before db.close() const it = db.iterator() - - it.next(function (err, key, value) { - t.ifError(err, 'no error from next()') - t.equal(key, 'one', 'correct key') - t.equal(value, '1', 'correct value') - it.close(function (err) { - t.ifError(err, 'no error from close()') - done() - }) - }) + t.same(await it.next(), ['a', '1'], 'correct entry') + await it.close() + return db.close() }) -makeTest('test likely-closed iterator', function (db, t, done) { +makeTest('likely-closed iterator', async function (db, t) { // Test improper usage: not calling it.close() before db.close(). Cleanup of the // database will crash Node if not handled properly. const it = db.iterator() - it.next(function (err, key, value) { - t.ifError(err, 'no error from next()') - t.equal(key, 'one', 'correct key') - t.equal(value, '1', 'correct value') - done() - }) + // Two calls are needed to populate the cache + t.same(await it.next(), ['a', '1'], 'correct entry (1)') + t.same(await it.next(), ['b', '2'], 'correct entry (2)') + + return db.close() }) -makeTest('test non-closed iterator', function (db, t, done) { +makeTest('non-closed iterator', async function (db, t) { // Same as the test above but with a highWaterMarkBytes of 0 so that we don't // preemptively fetch all records, to ensure that the iterator is still // active when we (attempt to) close the database. const it = db.iterator({ highWaterMarkBytes: 0 }) - it.next(function (err, key, value) { - t.ifError(err, 'no error from next()') - t.equal(key, 'one', 'correct key') - t.equal(value, '1', 'correct value') - done() - }) + t.same(await it.next(), ['a', '1'], 'correct entry (1)') + t.same(await it.next(), ['b', '2'], 'correct entry (2)') + + return db.close() }) -makeTest('test multiple likely-closed iterators', function (db, t, done) { +makeTest('non-closed iterator without caching', async function (db, t) { + const it = db.iterator({ highWaterMarkBytes: 0 }) + t.same(await it.next(), ['a', '1'], 'correct entry (1)') + return db.close() +}) + +makeTest('multiple likely-closed iterators', async function (db, t) { // Same as the test above but repeated and with an extra iterator that is not - // nexting, which means its EndWorker will be executed almost immediately. + // nexting, which means its CloseWorker will be executed almost immediately. for (let i = 0; i < repeats; i++) { db.iterator() - db.iterator().next(function () {}) + db.iterator().next() } - setTimeout(done, Math.floor(Math.random() * 50)) + // Avoid async/await to avoid introducing an extra tick + return new Promise((resolve, reject) => { + setTimeout(() => { + db.close().then(resolve, reject) + }, Math.floor(Math.random() * 50)) + }) }) -makeTest('test multiple non-closed iterators', function (db, t, done) { +makeTest('multiple non-closed iterators', async function (db, t) { // Same as the test above but with a highWaterMarkBytes of 0. for (let i = 0; i < repeats; i++) { db.iterator({ highWaterMarkBytes: 0 }) - db.iterator({ highWaterMarkBytes: 0 }).next(function () {}) + db.iterator({ highWaterMarkBytes: 0 }).next() } - setTimeout(done, Math.floor(Math.random() * 50)) + // Avoid async/await to avoid introducing an extra tick + return new Promise((resolve, reject) => { + setTimeout(() => { + db.close().then(resolve, reject) + }, Math.floor(Math.random() * 50)) + }) }) -global.gc && makeTest('test multiple non-closed iterators with forced gc', function (db, t, done) { +global.gc && makeTest('multiple non-closed iterators with forced gc', async function (db, t) { // Same as the test above but with forced GC, to test that the lifespan of an // iterator is tied to *both* its JS object and whether the iterator was closed. for (let i = 0; i < repeats; i++) { db.iterator({ highWaterMarkBytes: 0 }) - db.iterator({ highWaterMarkBytes: 0 }).next(function () {}) + db.iterator({ highWaterMarkBytes: 0 }).next() } - setTimeout(function () { - global.gc() - done() - }, Math.floor(Math.random() * 50)) + // Avoid async/await to avoid introducing an extra tick + return new Promise((resolve, reject) => { + setTimeout(() => { + global.gc() + db.close().then(resolve, reject) + }, Math.floor(Math.random() * 50)) + }) }) -makeTest('test closing iterators', function (db, t, done) { - // At least one end() should be in progress when we try to close the db. - const it1 = db.iterator() - it1.next(function () { - it1.close(function () {}) - }) - const it2 = db.iterator() - it2.next(function () { - it2.close(function () {}) - done() +makeTest('closing iterators', async function (db, t) { + return new Promise((resolve, reject) => { + // At least one close() should be in progress when we try to close the db. + const it1 = db.iterator() + it1.next().then(function () { + it1.close() + }) + + const it2 = db.iterator() + it2.next().then(function () { + it2.close() + db.close().then(resolve, reject) + }) }) }) -makeTest('test recursive next', function (db, t, done) { +makeTest('recursive next', async function (db, t) { // Test that we're able to close when user keeps scheduling work const it = db.iterator({ highWaterMarkBytes: 0 }) - it.next(function loop (err, key) { - if (err && err.code !== 'LEVEL_ITERATOR_NOT_OPEN') throw err - if (key !== undefined) it.next(loop) - }) + function resolve (entry) { + if (entry !== undefined) it.next().then(resolve, reject) + } + + function reject (err) { + if (err.code !== 'LEVEL_ITERATOR_NOT_OPEN') throw err + } - done() + it.next().then(resolve, reject) + return db.close() }) -makeTest('test recursive next (random)', function (db, t, done) { +makeTest('recursive next (random)', async function (db, t) { // Same as the test above but closing at a random time const it = db.iterator({ highWaterMarkBytes: 0 }) - it.next(function loop (err, key) { - if (err && err.code !== 'LEVEL_ITERATOR_NOT_OPEN') throw err - if (key !== undefined) it.next(loop) - }) + function resolve (entry) { + if (entry !== undefined) it.next().then(resolve, reject) + } + + function reject (err) { + if (err.code !== 'LEVEL_ITERATOR_NOT_OPEN') throw err + } + + it.next().then(resolve, reject) - setTimeout(done, Math.floor(Math.random() * 50)) + // Avoid async/await to avoid introducing an extra tick + return new Promise((resolve, reject) => { + setTimeout(() => { + db.close().then(resolve, reject) + }, Math.floor(Math.random() * 50)) + }) }) diff --git a/test/clear-gc-test.js b/test/clear-gc-test.js index 2794878..abfc997 100644 --- a/test/clear-gc-test.js +++ b/test/clear-gc-test.js @@ -12,36 +12,22 @@ for (let i = 0; i < 1e3; i++) { }) } -test('db without ref does not get GCed while clear() is in progress', function (t) { - t.plan(4) - +test('db without ref does not get GCed while clear() is in progress', async function (t) { let db = testCommon.factory() - db.open(function (err) { - t.ifError(err, 'no open error') - - // Insert test data - db.batch(sourceData.slice(), function (err) { - t.ifError(err, 'no batch error') + await db.open() + await db.batch(sourceData.slice()) - // Start async work - db.clear(function () { - t.pass('got callback') + // Start async work + const promise = db.clear() - // Give GC another chance to run, to rule out other issues. - setImmediate(function () { - if (global.gc) global.gc() - t.pass() - }) - }) + // Remove reference. The db should not get garbage collected + // until after the clear() callback, thanks to a napi_ref. + db = null - // Remove reference. The db should not get garbage collected - // until after the clear() callback, thanks to a napi_ref. - db = null + // Useful for manual testing with "node --expose-gc". + // The pending tap assertion may also allow GC to kick in. + if (global.gc) global.gc() - // Useful for manual testing with "node --expose-gc". - // The pending tap assertion may also allow GC to kick in. - if (global.gc) global.gc() - }) - }) + return promise }) diff --git a/test/compact-range-test.js b/test/compact-range-test.js index 7de0fe0..ee32e2a 100644 --- a/test/compact-range-test.js +++ b/test/compact-range-test.js @@ -2,70 +2,60 @@ const test = require('tape') const testCommon = require('./common') -const noop = () => {} let db -test('setUp db', function (t) { +test('compactRange() setup', async function (t) { db = testCommon.factory() - db.open(t.end.bind(t)) + return db.open() }) -test('test compactRange() throws if arguments are missing', function (t) { - for (const args of [[], ['foo'], [noop], ['foo', noop]]) { - t.throws(() => db.compactRange(...args), { - name: 'TypeError', - message: "The arguments 'start' and 'end' are required" - }) +test('compactRange() throws if arguments are missing', async function (t) { + t.plan(2 * 2) + + for (const args of [[], ['foo']]) { + try { + await db.compactRange(...args) + } catch (err) { + t.is(err.name, 'TypeError') + t.is(err.message, "The arguments 'start' and 'end' are required") + } } - t.end() }) -test('test compactRange() frees disk space after key deletion', function (t) { +test('compactRange() frees disk space after key deletion', async function (t) { const key1 = '000000' const key2 = '000001' const val1 = Buffer.allocUnsafe(64).fill(1) const val2 = Buffer.allocUnsafe(64).fill(1) - db.batch().put(key1, val1).put(key2, val2).write(function (err) { - t.ifError(err, 'no batch put error') - - db.compactRange(key1, key2, function (err) { - t.ifError(err, 'no compactRange1 error') - - db.approximateSize('0', 'z', function (err, sizeAfterPuts) { - t.error(err, 'no approximateSize1 error') + await db.batch().put(key1, val1).put(key2, val2).write() + await db.compactRange(key1, key2) - db.batch().del(key1).del(key2).write(function (err) { - t.ifError(err, 'no batch del error') + const sizeAfterPuts = await db.approximateSize('0', 'z') - db.compactRange(key1, key2, function (err) { - t.ifError(err, 'no compactRange2 error') + await db.batch().del(key1).del(key2).write() + await db.compactRange(key1, key2) - db.approximateSize('0', 'z', function (err, sizeAfterCompact) { - t.error(err, 'no approximateSize2 error') - t.ok(sizeAfterCompact < sizeAfterPuts) - t.end() - }) - }) - }) - }) - }) - }) + const sizeAfterCompact = await db.approximateSize('0', 'z') + t.ok(sizeAfterCompact < sizeAfterPuts) }) -test('tearDown', function (t) { - db.close(t.end.bind(t)) +test('compactRange() teardown', async function (t) { + return db.close() }) -test('test compactRange() yields error if db is closed', function (t) { - db.compactRange('foo', 'foo', function (err) { - t.is(err && err.code, 'LEVEL_DATABASE_NOT_OPEN') - t.end() - }) +test('compactRange() yields error if db is closed', async function (t) { + t.plan(1) + + try { + await db.compactRange('foo', 'foo') + } catch (err) { + t.is(err.code, 'LEVEL_DATABASE_NOT_OPEN') + } }) -test('test compactRange() is deferred', async function (t) { +test('compactRange() is deferred', async function (t) { const opening = db.open().then(() => 'opening') const deferred = db.compactRange('a', 'b').then(() => 'deferred') t.is(await Promise.race([opening, deferred]), 'opening') @@ -74,7 +64,7 @@ test('test compactRange() is deferred', async function (t) { }) // NOTE: copied from encoding-down -test('encodes start and end of compactRange()', async function (t) { +test('compactRange() encodes start and end', async function (t) { const calls = [] const keyEncoding = { name: 'test', @@ -93,7 +83,7 @@ test('encodes start and end of compactRange()', async function (t) { }) // NOTE: adapted from encoding-down -test('encodes start and end of compactRange() with custom encoding', async function (t) { +test('compactRange() encodes start and end with custom encoding', async function (t) { const calls = [] const keyEncoding = { name: 'test', diff --git a/test/compression-test.js b/test/compression-test.js index da1f73c..ba38a17 100644 --- a/test/compression-test.js +++ b/test/compression-test.js @@ -1,8 +1,6 @@ 'use strict' -const each = require('async-each') const du = require('du') -const delayed = require('delayed') const testCommon = require('./common') const { ClassicLevel } = require('..') const test = require('tape') @@ -15,73 +13,82 @@ const multiples = 10 const dataSize = compressableData.length * multiples const verify = function (location, compression, t) { - du(location, function (err, size) { - t.error(err) - if (compression) { - t.ok(size < dataSize, 'on-disk size (' + size + ') is less than data size (' + dataSize + ')') - } else { - t.ok(size >= dataSize, 'on-disk size (' + size + ') is greater than data size (' + dataSize + ')') - } - t.end() + return new Promise(function (resolve, reject) { + du(location, function (err, size) { + if (err) return reject(err) + + if (compression) { + t.ok(size < dataSize, 'on-disk size (' + size + ') is less than data size (' + dataSize + ')') + } else { + t.ok(size >= dataSize, 'on-disk size (' + size + ') is greater than data size (' + dataSize + ')') + } + + resolve() + }) }) } // close, open, close again.. 'compaction' is also performed on open()s -const cycle = function (db, compression, t, callback) { +const cycle = async function (db, compression) { const location = db.location - db.close(function (err) { - t.error(err) - db = new ClassicLevel(location) - db.open({ errorIfExists: false, compression }, function () { - t.error(err) - db.close(function (err) { - t.error(err) - callback() - }) - }) - }) + await db.close() + db = new ClassicLevel(location) + await db.open({ errorIfExists: false, compression }) + return db.close() } test('compression', function (t) { - t.plan(3) + t.plan(4) - t.test('test data is compressed by default (db.put())', function (t) { + t.test('data is compressed by default (db.put())', async function (t) { const db = testCommon.factory() - db.open(function (err) { - t.error(err) - each( - Array.apply(null, Array(multiples)).map(function (e, i) { - return [i, compressableData] - }), function (args, callback) { - db.put.apply(db, args.concat([callback])) - }, cycle.bind(null, db, true, t, delayed.delayed(verify.bind(null, db.location, true, t), 0.01)) - ) + await db.open() + + const promises = Array.apply(null, Array(multiples)).map(function (e, i) { + return db.put(String(i), compressableData) }) + + await Promise.all(promises) + await cycle(db, true) + await verify(db.location, true, t) }) - t.test('test data is not compressed with compression=false on open() (db.put())', function (t) { + t.test('data is not compressed with compression=false on open() (db.put())', async function (t) { const db = testCommon.factory() - db.open({ compression: false }, function (err) { - t.error(err) - each( - Array.apply(null, Array(multiples)).map(function (e, i) { - return [i, compressableData] - }), function (args, callback) { - db.put.apply(db, args.concat([callback])) - }, cycle.bind(null, db, false, t, delayed.delayed(verify.bind(null, db.location, false, t), 0.01)) - ) + await db.open({ compression: false }) + + const promises = Array.apply(null, Array(multiples)).map(function (e, i) { + return db.put(String(i), compressableData) }) + + await Promise.all(promises) + await cycle(db, false) + await verify(db.location, false, t) }) - t.test('test data is compressed by default (db.batch())', function (t) { + t.test('data is compressed by default (db.batch())', async function (t) { const db = testCommon.factory() - db.open(function (err) { - t.error(err) - db.batch( - Array.apply(null, Array(multiples)).map(function (e, i) { - return { type: 'put', key: i, value: compressableData } - }), cycle.bind(null, db, true, t, delayed.delayed(verify.bind(null, db.location, true, t), 0.01)) - ) + await db.open() + + const operations = Array.apply(null, Array(multiples)).map(function (e, i) { + return { type: 'put', key: String(i), value: compressableData } + }) + + await db.batch(operations) + await cycle(db, true) + await verify(db.location, true, t) + }) + + t.test('data is not compressed with compression=false on factory (db.batch())', async function (t) { + const db = testCommon.factory({ compression: false }) + await db.open() + + const operations = Array.apply(null, Array(multiples)).map(function (e, i) { + return { type: 'put', key: String(i), value: compressableData } }) + + await db.batch(operations) + await cycle(db, false) + await verify(db.location, false, t) }) }) diff --git a/test/destroy-test.js b/test/destroy-test.js index eff3413..7d80295 100644 --- a/test/destroy-test.js +++ b/test/destroy-test.js @@ -3,6 +3,7 @@ const test = require('tape') const tempy = require('tempy') const fs = require('fs') +const fsp = require('fs/promises') const path = require('path') const mkfiletree = require('mkfiletree') const readfiletree = require('readfiletree') @@ -10,20 +11,21 @@ const rimraf = require('rimraf') const { ClassicLevel } = require('..') const makeTest = require('./make') -test('test destroy() without location throws', function (t) { - t.throws(ClassicLevel.destroy, { - name: 'TypeError', - message: "The first argument 'location' must be a non-empty string" - }) - t.throws(() => ClassicLevel.destroy(''), { - name: 'TypeError', - message: "The first argument 'location' must be a non-empty string" - }) - t.end() +test('test destroy() without location throws', async function (t) { + t.plan(2 * 2) + + for (const args of [[], ['']]) { + try { + await ClassicLevel.destroy(...args) + } catch (err) { + t.is(err.name, 'TypeError') + t.is(err.message, "The first argument 'location' must be a non-empty string") + } + } }) test('test destroy non-existent directory', function (t) { - t.plan(4) + t.plan(3) const location = tempy.directory() const parent = path.dirname(location) @@ -32,31 +34,29 @@ test('test destroy non-existent directory', function (t) { t.ok(fs.existsSync(parent), 'parent exists before') // Cleanup to avoid conflicts with other tests + // TODO: use promise rimraf(location, { glob: false }, function (err) { t.ifError(err, 'no error from rimraf()') - ClassicLevel.destroy(location, function (err) { - t.error(err, 'no error') - + ClassicLevel.destroy(location).then(function () { // Assert that destroy() didn't inadvertently create the directory. // Or if it did, that it was at least cleaned up afterwards. t.notOk(fs.existsSync(location), 'directory does not exist after') - }) + }, t.fail.bind(t)) }) }) test('test destroy non-existent parent directory', function (t) { - t.plan(3) + t.plan(2) const location = '/1/2/3/4' const parent = path.dirname(location) t.notOk(fs.existsSync(parent), 'parent does not exist before') - ClassicLevel.destroy(location, function (err) { - t.error(err, 'no error') + ClassicLevel.destroy(location).then(function () { t.notOk(fs.existsSync(location), 'directory does not exist after') - }) + }, t.fail.bind(t)) }) test('test destroy non leveldb directory', function (t) { @@ -65,12 +65,11 @@ test('test destroy non leveldb directory', function (t) { bar: { one: 'ONE', two: 'TWO', three: 'THREE' } } + // TODO: use promise and/or simplify this test mkfiletree.makeTemp('destroy-test', tree, function (err, dir) { t.ifError(err, 'no error from makeTemp()') - ClassicLevel.destroy(dir, function (err) { - t.ifError(err, 'no error from destroy()') - + ClassicLevel.destroy(dir).then(function () { readfiletree(dir, function (err, actual) { t.ifError(err, 'no error from readfiletree()') t.deepEqual(actual, tree, 'directory remains untouched') @@ -80,40 +79,26 @@ test('test destroy non leveldb directory', function (t) { t.end() }) }) - }) + }, t.fail.bind(t)) }) }) -makeTest('test destroy() cleans and removes leveldb-only dir', function (db, t, done) { +makeTest('test destroy() cleans and removes leveldb-only dir', async function (db, t) { const location = db.location - db.close(function (err) { - t.ifError(err, 'no error from close()') - ClassicLevel.destroy(location, function (err) { - t.ifError(err, 'no error from destroy()') - t.notOk(fs.existsSync(location), 'directory completely removed') + await db.close() + await ClassicLevel.destroy(location) - done(null, false) - }) - }) + t.notOk(fs.existsSync(location), 'directory completely removed') }) -makeTest('test destroy() cleans and removes only leveldb parts of a dir', function (db, t, done) { +makeTest('test destroy() cleans and removes only leveldb parts of a dir', async function (db, t) { const location = db.location fs.writeFileSync(path.join(location, 'foo'), 'FOO') - db.close(function (err) { - t.ifError(err, 'no error from close()') + await db.close() + await ClassicLevel.destroy(location) - ClassicLevel.destroy(location, function (err) { - t.ifError(err, 'no error from destroy()') - - readfiletree(location, function (err, tree) { - t.ifError(err, 'no error from readfiletree()') - t.deepEqual(tree, { foo: 'FOO' }, 'non-leveldb files left intact') - - done(null, false) - }) - }) - }) + t.same(await fsp.readdir(location), ['foo'], 'non-leveldb files left intact') + t.same(await fsp.readFile(path.join(location, 'foo'), 'utf8'), 'FOO', 'content left intact') }) diff --git a/test/env-cleanup-hook-test.js b/test/env-cleanup-hook-test.js index cf20143..ef9eac1 100644 --- a/test/env-cleanup-hook-test.js +++ b/test/env-cleanup-hook-test.js @@ -18,7 +18,9 @@ function addTest (steps) { test(`cleanup on environment exit (${steps.join(', ')})`, function (t) { t.plan(3) - const child = fork(path.join(__dirname, 'env-cleanup-hook.js'), steps) + const child = fork(path.join(__dirname, 'env-cleanup-hook.js'), steps, { + execArgv: [...process.execArgv, '--unhandled-rejections=strict'] + }) child.on('message', function (m) { t.is(m, steps[steps.length - 1], `got to step: ${m}`) diff --git a/test/env-cleanup-hook.js b/test/env-cleanup-hook.js index ee2d730..cf26a33 100644 --- a/test/env-cleanup-hook.js +++ b/test/env-cleanup-hook.js @@ -1,8 +1,9 @@ 'use strict' const testCommon = require('./common') +const noop = () => {} -function test (steps) { +async function test (steps) { let step function nextStep () { @@ -13,7 +14,8 @@ function test (steps) { if (nextStep() !== 'create') { // Send a message triggering an environment exit // and indicating at which step we stopped. - return process.send(step) + process.send(step) + return } const db = testCommon.factory() @@ -21,39 +23,33 @@ function test (steps) { if (nextStep() !== 'open') { if (nextStep() === 'open-error') { // If opening fails the cleanup hook should be a noop. - db.open({ createIfMissing: false, errorIfExists: true }, function (err) { - if (!err) throw new Error('Expected an open() error') - }) + db.open({ createIfMissing: false, errorIfExists: true }).then(function () { + throw new Error('Expected an open() error') + }, noop) } return process.send(step) } // Open the db, expected to be closed by the cleanup hook. - db.open(function (err) { - if (err) throw err - - if (nextStep() === 'create-iterator') { - // Create an iterator, expected to be closed by the cleanup hook. - const it = db.iterator() - - if (nextStep() === 'nexting') { - // This async work should finish before the cleanup hook is called. - it.next(function (err) { - if (err) throw err - }) - } - } + await db.open() + + if (nextStep() === 'create-iterator') { + // Create an iterator, expected to be closed by the cleanup hook. + const it = db.iterator() - if (nextStep() === 'close') { - // Close the db, after which the cleanup hook is a noop. - db.close(function (err) { - if (err) throw err - }) + if (nextStep() === 'nexting') { + // This async work should finish before the cleanup hook is called. + it.next() } + } - process.send(step) - }) + if (nextStep() === 'close') { + // Close the db, after which the cleanup hook is a noop. + db.close() + } + + process.send(step) } test(process.argv.slice(2)) diff --git a/test/getproperty-test.js b/test/getproperty-test.js index 9e7b8e5..e7e3981 100644 --- a/test/getproperty-test.js +++ b/test/getproperty-test.js @@ -5,12 +5,12 @@ const testCommon = require('./common') let db -test('setUp db', function (t) { +test('getProperty() setup', async function (t) { db = testCommon.factory() - db.open(t.end.bind(t)) + return db.open() }) -test('test argument-less getProperty() throws', function (t) { +test('argument-less getProperty() throws', function (t) { t.throws(db.getProperty.bind(db), { name: 'TypeError', message: "The first argument 'property' must be a string" @@ -18,7 +18,7 @@ test('test argument-less getProperty() throws', function (t) { t.end() }) -test('test non-string getProperty() throws', function (t) { +test('non-string getProperty() throws', function (t) { t.throws(db.getProperty.bind(db, {}), { name: 'TypeError', message: "The first argument 'property' must be a string" @@ -26,13 +26,13 @@ test('test non-string getProperty() throws', function (t) { t.end() }) -test('test invalid getProperty() returns empty string', function (t) { +test('invalid getProperty() returns empty string', function (t) { t.equal(db.getProperty('foo'), '', 'invalid property') t.equal(db.getProperty('leveldb.foo'), '', 'invalid leveldb.* property') t.end() }) -test('test invalid getProperty("leveldb.num-files-at-levelN") returns numbers', function (t) { +test('invalid getProperty("leveldb.num-files-at-levelN") returns numbers', function (t) { for (let i = 0; i < 7; i++) { t.equal(db.getProperty('leveldb.num-files-at-level' + i), '0', '"leveldb.num-files-at-levelN" === "0"') @@ -40,12 +40,12 @@ test('test invalid getProperty("leveldb.num-files-at-levelN") returns numbers', t.end() }) -test('test invalid getProperty("leveldb.stats")', function (t) { +test('invalid getProperty("leveldb.stats")', function (t) { t.ok(db.getProperty('leveldb.stats').split('\n').length > 3, 'leveldb.stats has > 3 newlines') t.end() }) -test('test invalid getProperty("leveldb.sstables")', function (t) { +test('invalid getProperty("leveldb.sstables")', function (t) { const expected = [0, 1, 2, 3, 4, 5, 6].map(function (l) { return '--- level ' + l + ' ---' }).join('\n') + '\n' @@ -53,8 +53,8 @@ test('test invalid getProperty("leveldb.sstables")', function (t) { t.end() }) -test('tearDown', function (t) { - db.close(t.end.bind(t)) +test('getProperty() teardown', async function (t) { + return db.close() }) test('getProperty() throws if db is closed', function (t) { diff --git a/test/iterator-gc-test.js b/test/iterator-gc-test.js index c7391bf..0f3bb60 100644 --- a/test/iterator-gc-test.js +++ b/test/iterator-gc-test.js @@ -14,50 +14,37 @@ for (let i = 0; i < 1e3; i++) { // When you have a database open with an active iterator, but no references to // the db, V8 will GC the database and you'll get an failed assert from LevelDB. -test('db without ref does not get GCed while iterating', function (t) { - t.plan(6) - +test('db without ref does not get GCed while iterating', async function (t) { let db = testCommon.factory() - db.open(function (err) { - t.ifError(err, 'no open error') - - // Insert test data - db.batch(sourceData.slice(), function (err) { - t.ifError(err, 'no batch error') - - // Set highWaterMarkBytes to 0 so that we don't preemptively fetch. - const it = db.iterator({ highWaterMarkBytes: 0 }) - - // Remove reference - db = null - - if (global.gc) { - // This is the reliable way to trigger GC (and the bug if it exists). - // Useful for manual testing with "node --expose-gc". - global.gc() - iterate(it) - } else { - // But a timeout usually also allows GC to kick in. If not, the time - // between iterator ticks might. That's when "highWaterMarkBytes: 0" helps. - setTimeout(iterate.bind(null, it), 1000) - } - }) - }) + await db.open() + + // Insert test data + await db.batch(sourceData.slice()) - function iterate (it) { - // No reference to db here, could be GCed. It shouldn't.. - it.all(function (err, entries) { - t.ifError(err, 'no iterator error') - t.is(entries.length, sourceData.length, 'got data') + // Set highWaterMarkBytes to 0 so that we don't preemptively fetch. + const it = db.iterator({ highWaterMarkBytes: 0 }) - // Because we also have a reference on the iterator. That's the fix. - t.ok(it.db, 'abstract iterator has reference to db') + // Remove reference + db = null - // Which as luck would have it, also allows us to properly end this test. - it.db.close(function (err) { - t.ifError(err, 'no close error') - }) - }) + if (global.gc) { + // This is the reliable way to trigger GC (and the bug if it exists). + // Useful for manual testing with "node --expose-gc". + global.gc() + } else { + // But a timeout usually also allows GC to kick in. If not, the time + // between iterator ticks might. That's when "highWaterMarkBytes: 0" helps. + await new Promise(resolve => setTimeout(resolve, 1e3)) } + + // No reference to db here, could be GCed. It shouldn't.. + const entries = await it.all() + t.is(entries.length, sourceData.length, 'got data') + + // Because we also have a reference on the iterator. That's the fix. + t.ok(it.db, 'abstract iterator has reference to db') + + // Which as luck would have it, also allows us to properly end this test. + return it.db.close() }) diff --git a/test/iterator-hwm-test.js b/test/iterator-hwm-test.js index 6ca2c46..8571c1d 100644 --- a/test/iterator-hwm-test.js +++ b/test/iterator-hwm-test.js @@ -7,6 +7,7 @@ let db test('highWaterMarkBytes setup', async function (t) { db = testCommon.factory() + await db.open() // Write 8 bytes return db.batch().put('a', '0').put('b', '1').put('c', '2').put('d', '3').write() diff --git a/test/iterator-recursion-test.js b/test/iterator-recursion-test.js index 79f535f..c551203 100644 --- a/test/iterator-recursion-test.js +++ b/test/iterator-recursion-test.js @@ -5,8 +5,6 @@ const testCommon = require('./common') const fork = require('child_process').fork const path = require('path') -let db - const sourceData = (function () { const d = [] let i = 0 @@ -22,65 +20,43 @@ const sourceData = (function () { return d }()) -// TODO: fix this test. It asserted that we didn't segfault if user code had an -// infinite loop leading to stack exhaustion, which caused a node::FatalException() +// NOTE: this is an old leveldown test that asserts that we don't segfault if user code +// has an infinite loop leading to stack exhaustion, which caused a node::FatalException() // call in our Iterator to segfault. This was fixed in 2014 (commit 85e6a38). -// -// Today (2020), we see occasional failures in CI again. We no longer call -// node::FatalException() so there's a new reason. -test.skip('try to create an iterator with a blown stack', function (t) { - for (let i = 0; i < 100; i++) { - t.test(`try to create an iterator with a blown stack (${i})`, function (t) { - t.plan(3) - - // Reducing the stack size down from the default 984 for the child node - // process makes it easier to trigger the bug condition. But making it too low - // causes the child process to die for other reasons. - const opts = { execArgv: ['--stack-size=128'] } - const child = fork(path.join(__dirname, 'stack-blower.js'), ['run'], opts) - - child.on('message', function (m) { - t.ok(true, m) - child.disconnect() - }) - - child.on('exit', function (code, sig) { - t.is(code, 0, 'child exited normally') - t.is(sig, null, 'not terminated due to signal') - }) - }) - } - - t.end() -}) +test('try to create an iterator with a blown stack', function (t) { + t.plan(3) + + // Reducing the stack size down from the default 984 for the child node + // process makes it easier to trigger the bug condition. But making it too low + // causes the child process to die for other reasons. + const opts = { execArgv: ['--stack-size=256'] } + const child = fork(path.join(__dirname, 'stack-blower.js'), ['run'], opts) + + child.on('message', function (m) { + t.ok(true, m) + child.disconnect() + }) -test('setUp db', function (t) { - db = testCommon.factory() - db.open(function (err) { - t.error(err) - db.batch(sourceData, t.end.bind(t)) + child.on('exit', function (code, sig) { + t.is(code, 0, 'child exited normally') + t.is(sig, null, 'not terminated due to signal') }) }) -test('iterate over a large iterator with a large watermark', function (t) { +test('iterate over a large iterator with a large watermark', async function (t) { + const db = testCommon.factory() + + await db.open() + await db.batch(sourceData) + const iterator = db.iterator({ highWaterMarkBytes: 10000000 }) - const read = function () { - iterator.next(function (err, key, value) { - if (err) throw err - if (key === undefined && value === undefined) { - t.end() - } else { - read() - } - }) + while (true) { + const entry = await iterator.next() + if (entry === undefined) break } - read() -}) - -test('tearDown', function (t) { - db.close(t.end.bind(t)) + return db.close() }) diff --git a/test/iterator-starvation-test.js b/test/iterator-starvation-test.js index 89bf4ac..33d601d 100644 --- a/test/iterator-starvation-test.js +++ b/test/iterator-starvation-test.js @@ -4,9 +4,11 @@ const test = require('tape') const testCommon = require('./common') const sourceData = [] -// For this test the number of records in the db must be a multiple of -// the hardcoded fast-future limit (1000) or a cache size limit in C++. -for (let i = 0; i < 1e4; i++) { +// For this test the number of entries in the db must be a multiple of +// the hardcoded limit in iterator.js (1000). +const limit = 1000 + +for (let i = 0; i < limit * 10; i++) { sourceData.push({ type: 'put', key: i.toString(), @@ -14,107 +16,91 @@ for (let i = 0; i < 1e4; i++) { }) } -test('iterator does not starve event loop', function (t) { - t.plan(6) +test('iterator does not starve event loop', async function (t) { + t.plan(2) const db = testCommon.factory() - db.open(function (err) { - t.ifError(err, 'no open error') - - // Insert test data - db.batch(sourceData.slice(), function (err) { - t.ifError(err, 'no batch error') - - // Set a high highWaterMarkBytes to fill up the cache entirely - const it = db.iterator({ highWaterMarkBytes: Math.pow(1024, 3) }) - - let breaths = 0 - let entries = 0 - let scheduled = false - - // Iterate continuously while also scheduling work with setImmediate(), - // which should be given a chance to run because we limit the tick depth. - const next = function () { - it.next(function (err, key, value) { - if (err || (key === undefined && value === undefined)) { - t.ifError(err, 'no next error') - t.is(entries, sourceData.length, 'got all data') - t.is(breaths, sourceData.length / 1000, 'breathed while iterating') - - return db.close(function (err) { - t.ifError(err, 'no close error') - }) - } - - entries++ - - if (!scheduled) { - scheduled = true - setImmediate(function () { - breaths++ - scheduled = false - }) - } - - next() - }) - } - - next() - }) - }) + // Insert test data + await db.open() + await db.batch(sourceData.slice()) + + // Set a high highWaterMarkBytes to fill up the cache entirely + const it = db.iterator({ highWaterMarkBytes: Math.pow(1024, 3) }) + + let breaths = 0 + let entries = 0 + let scheduled = false + + // Iterate continuously while also scheduling work with setImmediate(), + // which should be given a chance to run because we limit the tick depth. + const next = async function () { + const entry = await it.next() + + if (entry === undefined) { + t.is(entries, sourceData.length, 'got all data') + t.is(breaths, sourceData.length / limit, 'breathed while iterating') + + return db.close() + } + + entries++ + + if (!scheduled) { + scheduled = true + setImmediate(function () { + breaths++ + scheduled = false + }) + } + + return next() + } + + return next() }) -test('iterator with seeks does not starve event loop', function (t) { - t.plan(6) +test('iterator with seeks does not starve event loop', async function (t) { + t.plan(2) const db = testCommon.factory() - db.open(function (err) { - t.ifError(err, 'no open error') + await db.open() + await db.batch(sourceData.slice()) - db.batch(sourceData.slice(), function (err) { - t.ifError(err, 'no batch error') + const it = db.iterator({ highWaterMarkBytes: Math.pow(1024, 3), limit: sourceData.length }) - const it = db.iterator({ highWaterMarkBytes: Math.pow(1024, 3), limit: sourceData.length }) + let breaths = 0 + let entries = 0 + let scheduled = false - let breaths = 0 - let entries = 0 - let scheduled = false + const next = async function () { + const entry = await it.next() - const next = function () { - it.next(function (err, key, value) { - if (err || (key === undefined && value === undefined)) { - t.ifError(err, 'no next error') - t.is(entries, sourceData.length, 'got all data') - t.is(breaths, sourceData.length - 1, 'breathed while iterating') + if (entry === undefined) { + t.is(entries, sourceData.length, 'got all data') + t.is(breaths, sourceData.length - 1, 'breathed while iterating') - return db.close(function (err) { - t.ifError(err, 'no close error') - }) - } + return db.close() + } - entries++ + entries++ - if (!scheduled) { - // Seeking clears the cache, which should only have a positive - // effect because it means the cache must be refilled, which - // again gives us time to breathe. This is a smoke test, really. - it.seek(sourceData[0].key) + if (!scheduled) { + // Seeking clears the cache, which should only have a positive + // effect because it means the cache must be refilled, which + // again gives us time to breathe. This is a smoke test, really. + it.seek(sourceData[0].key) - scheduled = true - setImmediate(function () { - breaths++ - scheduled = false - }) - } + scheduled = true + setImmediate(function () { + breaths++ + scheduled = false + }) + } - next() - }) - } + return next() + } - next() - }) - }) + return next() }) diff --git a/test/iterator-test.js b/test/iterator-test.js index 50706d0..fd2e2b9 100644 --- a/test/iterator-test.js +++ b/test/iterator-test.js @@ -1,41 +1,73 @@ 'use strict' -const make = require('./make') - -make('iterator optimized for seek', function (db, t, done) { - const batch = db.batch() - batch.put('a', 1) - batch.put('b', 1) - batch.put('c', 1) - batch.put('d', 1) - batch.put('e', 1) - batch.put('f', 1) - batch.put('g', 1) - batch.write(function (err) { - const ite = db.iterator() - t.ifError(err, 'no error from batch()') - ite.next(function (err, key, value) { - t.ifError(err, 'no error from next()') - t.equal(key.toString(), 'a', 'key matches') - t.equal(ite.cached, 0, 'no cache') - ite.next(function (err, key, value) { - t.ifError(err, 'no error from next()') - t.equal(key.toString(), 'b', 'key matches') - t.ok(ite.cached > 0, 'has cached items') - ite.seek('d') - t.is(ite.cached, 0, 'cache is emptied') - ite.next(function (err, key, value) { - t.ifError(err, 'no error from next()') - t.equal(key.toString(), 'd', 'key matches') - t.equal(ite.cached, 0, 'no cache') - ite.next(function (err, key, value) { - t.ifError(err, 'no error from next()') - t.equal(key.toString(), 'e', 'key matches') - t.ok(ite.cached > 0, 'has cached items') - ite.close(done) - }) - }) - }) - }) - }) +const test = require('tape') +const testCommon = require('./common') + +test('iterator optimized for seek', async function (t) { + const db = testCommon.factory() + + await db.open() + await db.batch() + .put('a', 'value') + .put('b', 'value') + .put('c', 'value') + .put('d', 'value') + .put('e', 'value') + .put('f', 'value') + .put('g', 'value') + .write() + + const ite = db.iterator() + + t.same(await ite.next(), ['a', 'value'], 'entry matches') + t.is(ite.cached, 0, 'no cache') + + t.same(await ite.next(), ['b', 'value'], 'entry matches') + t.ok(ite.cached > 0, 'has cached items') + + ite.seek('d') + t.is(ite.cached, 0, 'cache is emptied') + + t.same(await ite.next(), ['d', 'value'], 'entry matches') + t.is(ite.cached, 0, 'no cache') + + t.same(await ite.next(), ['e', 'value'], 'entry matches') + t.ok(ite.cached > 0, 'has cached items') + + await ite.close() + return db.close() }) + +// TODO: move to abstract-level +for (const slice of [false, true]) { + test(`nextv() after next() respects cache (slice=${slice})`, async function (t) { + const db = testCommon.factory() + + await db.open() + await db.batch() + .put('a', 'value') + .put('b', 'value') + .put('c', 'value') + .put('d', 'value') + .write() + + const it = db.iterator() + + // Two called are needed to populate the cache + t.same(await it.next(), ['a', 'value'], 'entry a ok') + t.same(await it.next(), ['b', 'value'], 'entry b ok') + t.is(it.cached, 2) + + if (slice) { + t.same(await it.nextv(1), [['c', 'value']], 'entries ok (1)') + t.same(await it.nextv(1), [['d', 'value']], 'entries ok (2)') + t.same(await it.nextv(1), [], 'empty') + } else { + t.same(await it.nextv(10), [['c', 'value'], ['d', 'value']], 'entries ok') + t.same(await it.nextv(10), [], 'empty') + } + + await it.close() + return db.close() + }) +} diff --git a/test/leak-tester-batch.js b/test/leak-tester-batch.js index 4f7af33..212f0c3 100644 --- a/test/leak-tester-batch.js +++ b/test/leak-tester-batch.js @@ -5,30 +5,29 @@ const CHAINED = false const testCommon = require('./common') const crypto = require('crypto') -const assert = require('assert') let writeCount = 0 let rssBase -function print () { - if (writeCount % 100 === 0) { +function tick () { + if (++writeCount % 100 === 0) { if (typeof global.gc !== 'undefined') global.gc() console.log( 'writeCount =', writeCount, ', rss =', Math.round(process.memoryUsage().rss / rssBase * 100) + '%', - Math.round(process.memoryUsage().rss / 1024 / 1024) + 'M', - JSON.stringify([0, 1, 2, 3, 4, 5, 6].map(function (l) { - return db.getProperty('leveldb.num-files-at-level' + l) - })) + Math.round(process.memoryUsage().rss / 1024 / 1024) + 'M' ) } } const run = CHAINED - ? function () { + ? async function () { const batch = db.batch() + // TODO: a good amount of memory usage (and growth) comes from this code and not the db + // itself, which makes the output difficult to interpret. See if we can use fixed data + // without changing the meaning of the test. Same below (for non-chained). for (let i = 0; i < 100; i++) { let key = 'long key to test memory usage ' + String(Math.floor(Math.random() * 10000000)) if (BUFFERS) key = Buffer.from(key) @@ -37,15 +36,10 @@ const run = CHAINED batch.put(key, value) } - batch.write(function (err) { - assert(!err) - process.nextTick(run) - }) - - writeCount++ - print() + tick() + return batch.write() } - : function () { + : async function () { const batch = [] for (let i = 0; i < 100; i++) { @@ -56,18 +50,13 @@ const run = CHAINED batch.push({ type: 'put', key, value }) } - db.batch(batch, function (err) { - assert(!err) - process.nextTick(run) - }) - - writeCount++ - print() + tick() + return db.batch(batch) } const db = testCommon.factory() -db.open(function () { +db.open().then(async function () { rssBase = process.memoryUsage().rss - run() + while (true) await run() }) diff --git a/test/leak-tester-iterator.js b/test/leak-tester-iterator.js index 50ca019..5d0a602 100644 --- a/test/leak-tester-iterator.js +++ b/test/leak-tester-iterator.js @@ -9,39 +9,36 @@ if (!global.gc) { console.error('To force GC, run with "node --expose-gc"') } -function run () { - const it = db.iterator() +async function run () { + while (true) { + const it = db.iterator() - it.next(function (err) { - if (err) throw err + await it.next() + await it.close() - it.close(function (err) { - if (err) throw err + if (!rssBase) { + rssBase = process.memoryUsage().rss + } - if (!rssBase) { - rssBase = process.memoryUsage().rss - } + if (++count % 1000 === 0) { + if (global.gc) global.gc() - if (++count % 1000 === 0) { - if (global.gc) global.gc() + const rss = process.memoryUsage().rss + const percent = Math.round((rss / rssBase) * 100) + const mb = Math.round(rss / 1024 / 1024) - const rss = process.memoryUsage().rss - const percent = Math.round((rss / rssBase) * 100) - const mb = Math.round(rss / 1024 / 1024) - - console.log('count = %d, rss = %d% %dM', count, percent, mb) - } - - run() - }) - }) + console.log('count = %d, rss = %d% %dM', count, percent, mb) + } + } } -db.open(function (err) { - if (err) throw err +async function main () { + await db.open() + await db.put('key', 'value') + await run() +} - db.put('key', 'value', function (err) { - if (err) throw err - run() - }) +main().catch(function (err) { + console.error(err) + process.exit(1) }) diff --git a/test/leak-tester.js b/test/leak-tester.js index 1308d01..7974e73 100644 --- a/test/leak-tester.js +++ b/test/leak-tester.js @@ -7,43 +7,45 @@ const crypto = require('crypto') let putCount = 0 let getCount = 0 -let rssBase +let iterations = 0 -function run () { - let key = 'long key to test memory usage ' + String(Math.floor(Math.random() * 10000000)) +async function main () { + const db = testCommon.factory() + await db.open() - if (BUFFERS) key = Buffer.from(key) + const rssBase = process.memoryUsage().rss - db.get(key, function (err, value) { - getCount++ + while (true) { + let testKey = 'long key to test memory usage ' + String(Math.floor(Math.random() * 10000000)) + let testValue = crypto.randomBytes(1024) - if (err) { - let putValue = crypto.randomBytes(1024) - if (!BUFFERS) putValue = putValue.toString('hex') + if (BUFFERS) { + testKey = Buffer.from(testKey, 'utf8') + } else { + testValue = testValue.toString('hex') + } + + const value = await db.get(testKey, { fillCache: false }) - return db.put(key, putValue, function () { - putCount++ - process.nextTick(run) - }) + if (value === undefined) { + await db.put(testKey, testValue) + putCount++ + } else { + getCount++ } - process.nextTick(run) - }) - - if (getCount % 1000 === 0) { - if (typeof global.gc !== 'undefined') global.gc() - console.log('getCount =', getCount, ', putCount = ', putCount, ', rss =', - Math.round(process.memoryUsage().rss / rssBase * 100) + '%', - Math.round(process.memoryUsage().rss / 1024 / 1024) + 'M', - JSON.stringify([0, 1, 2, 3, 4, 5, 6].map(function (l) { - return db.getProperty('leveldb.num-files-at-level' + l) - }))) + if (iterations++ % 5e3 === 0) { + if (typeof global.gc !== 'undefined') global.gc() + + console.log('getCount =', getCount, ', putCount = ', putCount, ', rss =', + Math.round(process.memoryUsage().rss / rssBase * 100) + '%', + Math.round(process.memoryUsage().rss / 1024 / 1024) + 'M' + ) + } } } -const db = testCommon.factory() - -db.open(function () { - rssBase = process.memoryUsage().rss - run() +main().catch(function (err) { + console.error(err) + process.exit(1) }) diff --git a/test/lock-test.js b/test/lock-test.js index cbc0aeb..6f7d632 100644 --- a/test/lock-test.js +++ b/test/lock-test.js @@ -4,6 +4,7 @@ const test = require('tape') const tempy = require('tempy') const fork = require('child_process').fork const path = require('path') +const { once } = require('events') const { ClassicLevel } = require('..') test('lock held by same process', async function (t) { @@ -24,29 +25,26 @@ test('lock held by same process', async function (t) { return db1.close() }) -test('lock held by other process', function (t) { - t.plan(6) +test('lock held by other process', async function (t) { + t.plan(4) const location = tempy.directory() const db = new ClassicLevel(location) + await db.open() - db.open(function (err) { - t.ifError(err, 'no open error') + const child = fork(path.join(__dirname, 'lock.js'), [location]) - const child = fork(path.join(__dirname, 'lock.js'), [location]) + child.on('message', function (err) { + t.is(err.code, 'LEVEL_DATABASE_NOT_OPEN', 'second process failed to open') + t.is(err.cause.code, 'LEVEL_LOCKED', 'second process got lock error') - child.on('message', function (err) { - t.is(err.code, 'LEVEL_DATABASE_NOT_OPEN', 'second process failed to open') - t.is(err.cause.code, 'LEVEL_LOCKED', 'second process got lock error') + child.disconnect() + }) - child.disconnect() - }) + const [code, sig] = await once(child, 'exit') - child.on('exit', function (code, sig) { - t.is(code, 0, 'child exited normally') - t.is(sig, null, 'not terminated due to signal') + t.is(code, 0, 'child exited normally') + t.is(sig, null, 'not terminated due to signal') - db.close(t.ifError.bind(t)) - }) - }) + return db.close() }) diff --git a/test/lock.js b/test/lock.js index d74f6ac..bd5b89e 100644 --- a/test/lock.js +++ b/test/lock.js @@ -5,6 +5,8 @@ const { ClassicLevel } = require('..') const location = process.argv[2] const db = new ClassicLevel(location) -db.open(function (err) { +db.open().then(function () { + process.send(null) +}, function (err) { process.send(err) }) diff --git a/test/make.js b/test/make.js index 2d8ac35..0a34267 100644 --- a/test/make.js +++ b/test/make.js @@ -4,32 +4,17 @@ const test = require('tape') const testCommon = require('./common') function makeTest (name, testFn) { - test(name, function (t) { + test(name, async function (t) { const db = testCommon.factory() - const done = function (err, close) { - t.ifError(err, 'no error from done()') - if (close === false) { - t.end() - return - } + await db.open() + await db.batch([ + { type: 'put', key: 'a', value: '1' }, + { type: 'put', key: 'b', value: '2' }, + { type: 'put', key: 'c', value: '3' } + ]) - db.close(function (err) { - t.ifError(err, 'no error from close()') - t.end() - }) - } - db.open(function (err) { - t.ifError(err, 'no error from open()') - db.batch([ - { type: 'put', key: 'one', value: '1' }, - { type: 'put', key: 'two', value: '2' }, - { type: 'put', key: 'three', value: '3' } - ], function (err) { - t.ifError(err, 'no error from batch()') - testFn(db, t, done) - }) - }) + return testFn(db, t) }) } diff --git a/test/repair-test.js b/test/repair-test.js index 68b892c..4e91b3c 100644 --- a/test/repair-test.js +++ b/test/repair-test.js @@ -1,52 +1,48 @@ 'use strict' const test = require('tape') -const fs = require('fs') +const fsp = require('fs/promises') const { ClassicLevel } = require('..') const makeTest = require('./make') -test('test repair() without location throws', function (t) { - t.throws(ClassicLevel.repair, { - name: 'TypeError', - message: "The first argument 'location' must be a non-empty string" - }) - t.throws(() => ClassicLevel.repair(''), { - name: 'TypeError', - message: "The first argument 'location' must be a non-empty string" - }) - t.end() +test('test repair() without location throws', async function (t) { + t.plan(2 * 2) + + for (const args of [[], ['']]) { + try { + await ClassicLevel.repair(...args) + } catch (err) { + t.is(err.name, 'TypeError') + t.is(err.message, "The first argument 'location' must be a non-empty string") + } + } }) -test('test repair non-existent directory returns error', function (t) { - ClassicLevel.repair('/1/2/3/4', function (err) { +test('test repair non-existent directory returns error', async function (t) { + t.plan(1) + + try { + await ClassicLevel.repair('/1/2/3/4') + } catch (err) { if (process.platform !== 'win32') { - t.ok(/no such file or directory/i.test(err), 'error on callback') + t.ok(/no such file or directory/i.test(err), 'error') } else { - t.ok(/IO error/i.test(err), 'error on callback') + t.ok(/IO error/i.test(err), 'error') } - t.end() - }) + } }) // a proxy indicator that RepairDB is being called and doing its thing -makeTest('test repair() compacts', function (db, t, done) { - const location = db.location - - db.close(function (err) { - t.ifError(err, 'no error from close()') - - let files = fs.readdirSync(location) - t.ok(files.some(function (f) { return (/\.log$/).test(f) }), 'directory contains log file(s)') - t.notOk(files.some(function (f) { return (/\.ldb$/).test(f) }), 'directory does not contain ldb file(s)') +makeTest('test repair() compacts', async function (db, t) { + await db.close() - ClassicLevel.repair(location, function (err) { - t.ifError(err, 'no error from repair()') + let files = await fsp.readdir(db.location) + t.ok(files.some(function (f) { return (/\.log$/).test(f) }), 'directory contains log file(s)') + t.notOk(files.some(function (f) { return (/\.ldb$/).test(f) }), 'directory does not contain ldb file(s)') - files = fs.readdirSync(location) - t.notOk(files.some(function (f) { return (/\.log$/).test(f) }), 'directory does not contain log file(s)') - t.ok(files.some(function (f) { return (/\.ldb$/).test(f) }), 'directory contains ldb file(s)') + await ClassicLevel.repair(db.location) - done(null, false) - }) - }) + files = await fsp.readdir(db.location) + t.notOk(files.some(function (f) { return (/\.log$/).test(f) }), 'directory does not contain log file(s)') + t.ok(files.some(function (f) { return (/\.ldb$/).test(f) }), 'directory contains ldb file(s)') }) diff --git a/test/segfault-test.js b/test/segfault-test.js index dce70a4..1504119 100644 --- a/test/segfault-test.js +++ b/test/segfault-test.js @@ -7,83 +7,73 @@ const operations = [] // The db must wait for pending operations to finish before closing. This to // prevent segfaults and in the case of compactRange() to prevent hanging. See // https://github.com/Level/leveldown/issues/157 and 32. -function testPending (name, expectedCount, fn) { +function testPending (name, fn) { operations.push(fn) - test(`close() waits for pending ${name}`, function (t) { + test(`close() waits for pending ${name}`, async function (t) { const db = testCommon.factory() - let count = 0 + let finished = false - db.open(function (err) { - t.ifError(err, 'no error from open()') + await db.open() + await db.put('key', 'value') - db.put('key', 'value', function (err) { - t.ifError(err, 'no error from put()') - - fn(db, function (err) { - count++ - t.ifError(err, 'no error from operation') - }) + fn(db).then(function () { + finished = true + }) - db.close(function (err) { - t.ifError(err, 'no error from close()') - t.is(count, expectedCount, 'operation(s) finished before close') - t.end() - }) - }) + return db.close().then(function () { + t.is(finished, true, 'operation(s) finished before close') }) }) } -testPending('get()', 1, function (db, next) { - db.get('key', next) +testPending('get()', async function (db) { + return db.get('key') }) -testPending('put()', 1, function (db, next) { - db.put('key2', 'value', next) +testPending('put()', async function (db) { + return db.put('key2', 'value') }) -testPending('put() with { sync }', 1, function (db, next) { +testPending('put() with { sync }', async function (db) { // The sync option makes the operation slower and thus more likely to // cause a segfault (if closing were to happen during the operation). - db.put('key2', 'value', { sync: true }, next) + return db.put('key2', 'value', { sync: true }) }) -testPending('del()', 1, function (db, next) { - db.del('key', next) +testPending('del()', async function (db) { + return db.del('key') }) -testPending('del() with { sync }', 1, function (db, next) { - db.del('key', { sync: true }, next) +testPending('del() with { sync }', async function (db) { + return db.del('key', { sync: true }) }) -testPending('batch([])', 1, function (db, next) { - db.batch([{ type: 'del', key: 'key' }], next) +testPending('batch([])', async function (db) { + return db.batch([{ type: 'del', key: 'key' }]) }) -testPending('batch([]) with { sync }', 1, function (db, next) { - db.batch([{ type: 'del', key: 'key' }], { sync: true }, next) +testPending('batch([]) with { sync }', async function (db) { + return db.batch([{ type: 'del', key: 'key' }], { sync: true }) }) -testPending('batch()', 1, function (db, next) { - db.batch().del('key').write(next) +testPending('batch()', async function (db) { + return db.batch().del('key').write() }) -testPending('batch() with { sync }', 1, function (db, next) { - db.batch().del('key').write({ sync: true }, next) +testPending('batch() with { sync }', async function (db) { + return db.batch().del('key').write({ sync: true }) }) -testPending('approximateSize()', 1, function (db, next) { - db.approximateSize('a', 'z', next) +testPending('approximateSize()', async function (db) { + return db.approximateSize('a', 'z') }) -testPending('compactRange()', 1, function (db, next) { - db.compactRange('a', 'z', next) +testPending('compactRange()', async function (db) { + return db.compactRange('a', 'z') }) // Test multiple pending operations, using all of the above. -testPending('operations', operations.length, function (db, next) { - for (const fn of operations.slice(0, -1)) { - fn(db, next) - } +testPending('operations', async function (db) { + return Promise.all(operations.slice(0, -1).map(fn => fn(db))) }) diff --git a/test/stack-blower.js b/test/stack-blower.js index f1ff486..3e85181 100644 --- a/test/stack-blower.js +++ b/test/stack-blower.js @@ -4,7 +4,7 @@ * This test uses infinite recursion to test iterator creation with limited * stack space. In order to isolate the test harness, we run in a different * process. This is achieved through a fork() command in - * iterator-recursion-test.js. To prevent tap from trying to run this test + * iterator-recursion-test.js. To prevent tape from trying to run this test * directly, we check for a command-line argument. */ const testCommon = require('./common') @@ -13,17 +13,20 @@ if (process.argv[2] === 'run') { const db = testCommon.factory() let depth = 0 - db.open(function () { - function recurse () { - db.iterator({ gte: '0' }) - depth++ - recurse() - } + db.open().then(function () { + // Escape promise chain + process.nextTick(function () { + function recurse () { + db.iterator({ gte: '0' }) + depth++ + recurse() + } - try { - recurse() - } catch (e) { - process.send('Catchable error at depth ' + depth) - } + try { + recurse() + } catch (e) { + process.send('Catchable error at depth ' + depth) + } + }) }) }