diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2d3490a0da..02f52237e2 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,16 +14,7 @@ jobs: steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v4 - - # ref: https://github.com/pre-commit/action - - uses: pre-commit/action@v3.0.0 - - name: Help message if pre-commit fail - if: ${{ failure() }} + - name: "Run pre-commit" run: | - echo "You can install pre-commit hooks to automatically run formatting" - echo "on each commit with:" - echo " pre-commit install" - echo "or you can run by hand on staged files with" - echo " pre-commit run" - echo "or after-the-fact on already committed files with" - echo " pre-commit run --all-files --hook-stage manual" + pip install -U -q pre-commit + pre-commit run --all-files --hook-stage manual diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index db6ed1322d..5355d35978 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-case-conflict - id: check-executables-have-shebangs @@ -15,7 +15,7 @@ repos: # We use the Python version instead of the original version which seems to require Docker # https://github.com/koalaman/shellcheck-precommit - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.9.0.6 + rev: v0.10.0.1 hooks: - id: shellcheck name: shellcheck @@ -43,7 +43,7 @@ repos: [mdformat-gfm, mdformat-frontmatter, mdformat-footnote, mdformat-gfm-alerts] - repo: https://github.com/tcort/markdown-link-check - rev: v3.11.2 + rev: v3.12.2 hooks: - id: markdown-link-check args: ["-c", "markdown_link_config.json"] @@ -57,7 +57,7 @@ repos: stages: [manual] - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.27.3 + rev: 0.28.4 hooks: - id: check-github-workflows @@ -69,10 +69,10 @@ repos: - id: rst-inline-touching-normal - repo: https://github.com/codespell-project/codespell - rev: "v2.2.6" + rev: "v2.3.0" hooks: - id: codespell - args: ["-L", "fle,re-use,merchantibility,synching,crate,nin,infinit,te"] + args: ["-L", "fle,re-use,merchantibility,synching,crate,nin,infinit,te,checkin"] exclude: | (?x)^(.*\.rst )$ diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000..ddd534c41f --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,19 @@ +# Read the Docs configuration file for MkDocs projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.12" + +mkdocs: + configuration: mkdocs.yml + +# Optionally declare the Python requirements required to build your docs +python: + install: + - requirements: source/requirements.txt \ No newline at end of file diff --git a/README.md b/README.md index 92b8d07895..3f88b2fa57 100644 --- a/README.md +++ b/README.md @@ -1,77 +1,13 @@ # MongoDB Specifications +[![Documentation Status](https://readthedocs.org/projects/specifications/badge/?version=latest)](http://specifications.readthedocs.io/en/latest/?badge=latest) + This repository holds in progress and completed specification for features of MongoDB, Drivers, and associated products. Also contained is a rudimentary system for producing these documents. ## Driver Mantras -When developing specifications -- and the drivers themselves -- we follow the following principles: - -### Strive to be idiomatic, but favor consistency - -Drivers attempt to provide the easiest way to work with MongoDB in a given language ecosystem, while specifications -attempt to provide a consistent behavior and experience across all languages. Drivers should strive to be as idiomatic -as possible while meeting the specification and staying true to the original intent. - -### No Knobs - -Too many choices stress out users. Whenever possible, we aim to minimize the number of configuration options exposed to -users. In particular, if a typical user would have no idea how to choose a correct value, we pick a good default instead -of adding a knob. - -### Topology agnostic - -Users test and deploy against different topologies or might scale up from replica sets to sharded clusters. Applications -should never need to use the driver differently based on topology type. - -### Where possible, depend on server to return errors - -The features available to users depend on a server's version, topology, storage engine and configuration. So that -drivers don't need to code and test all possible variations, and to maximize forward compatibility, always let users -attempt operations and let the server error when it can't comply. Exceptions should be rare: for cases where the server -might not error and correctness is at stake. - -### Minimize administrative helpers - -Administrative helpers are methods for admin tasks, like user creation. These are rarely used and have maintenance costs -as the server changes the administrative API. Don't create administrative helpers; let users rely on "RunCommand" for -administrative commands. - -### Check wire version, not server version - -When determining server capabilities within the driver, rely only on the maxWireVersion in the hello response, not on -the X.Y.Z server version. An exception is testing server development releases, as the server bumps wire version early -and then continues to add features until the GA. - -### When in doubt, use "MUST" not "SHOULD" in specs - -Specs guide our work. While there are occasionally valid technical reasons for drivers to differ in their behavior, -avoid encouraging it with a wishy-washy "SHOULD" instead of a more assertive "MUST". - -### Defy augury - -While we have some idea of what the server will do in the future, don't design features with those expectations in mind. -Design and implement based on what is expected in the next release. - -Case Study: In designing OP_MSG, we held off on designing support for Document Sequences in Replies in drivers until the -server would support it. We subsequently decided not to implement that feature in the server. - -### The best way to see what the server does is to test it - -For any unusual case, relying on documentation or anecdote to anticipate the server's behavior in different -versions/topologies/etc. is error-prone. The best way to check the server's behavior is to use a driver or the shell and -test it directly. - -### Drivers follow semantic versioning - -Drivers should follow X.Y.Z versioning, where breaking API changes require a bump to X. See -[semver.org](https://semver.org/) for more. - -### Backward breaking behavior changes and semver - -Backward breaking behavior changes can be more dangerous and disruptive than backward breaking API changes. When -thinking about the implications of a behavior change, ask yourself what could happen if a user upgraded your library -without carefully reading the changelog and/or adequately testing the change. +See [Documentation](./source/driver-mantras.md). ## Writing Documents @@ -110,13 +46,11 @@ entire test with a note (e.g. *Removed*). ## Building Documents -We build the docs in `text` mode in CI to make sure they build without errors. We don't actually support building html, -since we rely on GitHub to render the documents. To build locally, run: +We use [mkdocs](https://www.mkdocs.org/) to render the documentation. To see a live view of the documentation, run: ```bash -pip install sphinx -cd source -sphinx-build -W -b text . docs_build index.rst +pip install mkdocs +mkdocs serve ``` ## Converting to JSON diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000000..1f8af2db19 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,4 @@ +site_name: MongoDB Specifications +docs_dir: source +nav: + - 'index.md' \ No newline at end of file diff --git a/scripts/check_anchors/package-lock.json b/scripts/check_anchors/package-lock.json index b669988812..86329d794d 100644 --- a/scripts/check_anchors/package-lock.json +++ b/scripts/check_anchors/package-lock.json @@ -411,9 +411,9 @@ } }, "node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "engines": { "node": ">=10.0.0" }, diff --git a/source/auth/auth.md b/source/auth/auth.md index 607945e181..b3150d1e15 100644 --- a/source/auth/auth.md +++ b/source/auth/auth.md @@ -1,4 +1,4 @@ -# Driver Authentication +# Authentication - Status: Accepted - Minimum Server Version: 2.6 @@ -148,7 +148,7 @@ Drivers MUST follow the following steps for an authentication handshake: If the authentication handshake fails for a socket, drivers MUST mark the server Unknown and clear the server's connection pool. (See [Q & A](#q--a) below and SDAM's -[Why mark a server Unknown after an auth error](/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#why-mark-a-server-unknown-after-an-auth-error) +[Why mark a server Unknown after an auth error](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#why-mark-a-server-unknown-after-an-auth-error) for rationale.) All blocking operations executed as part of the authentication handshake MUST apply timeouts per the @@ -1201,15 +1201,6 @@ in the MONGODB-OIDC specification, including sections or blocks that specificall #### [MongoCredential](#mongocredential) Properties -> [!NOTE] -> Drivers MUST NOT url-decode the entire `authMechanismProperties` given in an connection string when the -> `authMechanism` is `MONGODB-OIDC`. This is because the `TOKEN_RESOURCE` itself will typically be a URL and may contain -> a `,` character. The values of the individual `authMechanismProperties` MUST still be url-decoded when given as part -> of the connection string, and MUST NOT be url-decoded when not given as part of the connection string, such as through -> a `MongoClient` or `Credential` property. Drivers MUST parse the `TOKEN_RESOURCE` by splitting only on the first `:` -> character. Drivers MUST document that users must url-encode `TOKEN_RESOURCE` when it is provided in the connection -> string and it contains and of the special characters in \[`,`, `+`, `&`, `%`\]. - - username\ MAY be specified. Its meaning varies depending on the OIDC provider integration used. @@ -1233,7 +1224,9 @@ in the MONGODB-OIDC specification, including sections or blocks that specificall - TOKEN_RESOURCE\ The URI of the target resource. If `TOKEN_RESOURCE` is provided and `ENVIRONMENT` is not one of `["azure", "gcp"]` or `TOKEN_RESOURCE` is not provided and `ENVIRONMENT` is one of `["azure", "gcp"]`, the driver - MUST raise an error. + MUST raise an error. Note: because the `TOKEN_RESOURCE` is often itself a URL, drivers MUST document that a + `TOKEN_RESOURCE` with a comma `,` must be given as a `MongoClient` configuration and not as part of the connection + string, and that the `TOKEN_RESOURCE` value can contain a colon `:` character. - OIDC_CALLBACK\ An [OIDC Callback](#oidc-callback) that returns OIDC credentials. Drivers MAY allow the user to @@ -1260,7 +1253,7 @@ in the MONGODB-OIDC specification, including sections or blocks that specificall performed after SRV record resolution, if applicable. This property is only required for drivers that support the [Human Authentication Flow](#human-authentication-flow). -
+ #### Built-in OIDC Environment Integrations @@ -2049,6 +2042,8 @@ to EC2 instance metadata in ECS, for security reasons, Amazon states it's best p ## Changelog +- 2024-05-29: Disallow comma character when `TOKEN_RESOURCE` is given in a connection string. + - 2024-05-03: Clarify timeout behavior for OIDC machine callback. Add `serverless:forbid` to OIDC unified tests. Add an additional prose test for the behavior of `ALLOWED_HOSTS`. diff --git a/source/auth/tests/legacy/connection-string.json b/source/auth/tests/legacy/connection-string.json index f4c7f8c88e..67aafbff6e 100644 --- a/source/auth/tests/legacy/connection-string.json +++ b/source/auth/tests/legacy/connection-string.json @@ -163,47 +163,6 @@ "uri": "mongodb://localhost/?authMechanism=GSSAPI", "valid": false }, - { - "description": "should recognize the mechanism (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-CR", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "admin", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should use the database when no authSource is specified (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "foo", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should use the authSource when specified (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR&authSource=bar", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "bar", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should throw an exception if no username is supplied (MONGODB-CR)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-CR", - "valid": false - }, { "description": "should recognize the mechanism (MONGODB-X509)", "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", @@ -517,7 +476,7 @@ }, { "description": "should throw an exception if username is specified for test (MONGODB-OIDC)", - "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&ENVIRONMENT:test", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", "valid": false, "credential": null }, @@ -601,7 +560,7 @@ }, { "description": "should handle a complicated url-encoded TOKEN_RESOURCE (MONGODB-OIDC)", - "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abc%2Cd%25ef%3Ag%26hi", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abcd%25ef%3Ag%26hi", "valid": true, "credential": { "username": "user", @@ -610,7 +569,7 @@ "mechanism": "MONGODB-OIDC", "mechanism_properties": { "ENVIRONMENT": "azure", - "TOKEN_RESOURCE": "abc,d%ef:g&hi" + "TOKEN_RESOURCE": "abcd%ef:g&hi" } } }, @@ -669,4 +628,4 @@ "credential": null } ] -} \ No newline at end of file +} diff --git a/source/auth/tests/legacy/connection-string.yml b/source/auth/tests/legacy/connection-string.yml index c88eb1edce..ded258f29d 100644 --- a/source/auth/tests/legacy/connection-string.yml +++ b/source/auth/tests/legacy/connection-string.yml @@ -116,36 +116,6 @@ tests: - description: should throw an exception if no username (GSSAPI) uri: mongodb://localhost/?authMechanism=GSSAPI valid: false -- description: should recognize the mechanism (MONGODB-CR) - uri: mongodb://user:password@localhost/?authMechanism=MONGODB-CR - valid: true - credential: - username: user - password: password - source: admin - mechanism: MONGODB-CR - mechanism_properties: -- description: should use the database when no authSource is specified (MONGODB-CR) - uri: mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR - valid: true - credential: - username: user - password: password - source: foo - mechanism: MONGODB-CR - mechanism_properties: -- description: should use the authSource when specified (MONGODB-CR) - uri: mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR&authSource=bar - valid: true - credential: - username: user - password: password - source: bar - mechanism: MONGODB-CR - mechanism_properties: -- description: should throw an exception if no username is supplied (MONGODB-CR) - uri: mongodb://localhost/?authMechanism=MONGODB-CR - valid: false - description: should recognize the mechanism (MONGODB-X509) uri: mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509 valid: true @@ -375,7 +345,7 @@ tests: valid: false credential: - description: should throw an exception if username is specified for test (MONGODB-OIDC) - uri: mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&ENVIRONMENT:test + uri: mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test valid: false credential: - description: should throw an exception if specified environment is not supported (MONGODB-OIDC) @@ -435,7 +405,7 @@ tests: ENVIRONMENT: azure TOKEN_RESOURCE: 'mongodb://test-cluster' - description: should handle a complicated url-encoded TOKEN_RESOURCE (MONGODB-OIDC) - uri: mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abc%2Cd%25ef%3Ag%26hi + uri: mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abcd%25ef%3Ag%26hi valid: true credential: username: user @@ -444,7 +414,7 @@ tests: mechanism: MONGODB-OIDC mechanism_properties: ENVIRONMENT: azure - TOKEN_RESOURCE: 'abc,d%ef:g&hi' + TOKEN_RESOURCE: 'abcd%ef:g&hi' - description: should url-encode a TOKEN_RESOURCE (MONGODB-OIDC) uri: mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:a$b valid: true diff --git a/source/auth/tests/mongodb-oidc.md b/source/auth/tests/mongodb-oidc.md index 72c93f4d02..d551ef8dda 100644 --- a/source/auth/tests/mongodb-oidc.md +++ b/source/auth/tests/mongodb-oidc.md @@ -178,7 +178,7 @@ source the `secrets-export.sh` file and use the associated env variables in your - Assert that the callback was called 2 times. - Close the client. -\*\*4.3 Write Commands Fail If Reauthentication Fails +#### 4.3 Write Commands Fail If Reauthentication Fails - Create a `MongoClient` whose OIDC callback returns one good token and then bad tokens after the first call. - Perform an `insert` operation that succeeds. diff --git a/source/benchmarking/benchmarking.md b/source/benchmarking/benchmarking.md index 25ba6b928a..d342dcec34 100644 --- a/source/benchmarking/benchmarking.md +++ b/source/benchmarking/benchmarking.md @@ -1,4 +1,4 @@ -# MongoDB Driver Performance Benchmarking +# Performance Benchmarking - Status: Accepted - Minimum Server Version: N/A diff --git a/source/bson-corpus/bson-corpus.md b/source/bson-corpus/bson-corpus.md index 09764f5b8c..7969b138e2 100644 --- a/source/bson-corpus/bson-corpus.md +++ b/source/bson-corpus/bson-corpus.md @@ -37,7 +37,7 @@ pseudo-specification provides such tests. ## Specification The specification for BSON lives at . The `extjson` format specification is -[here](../extended-json.rst). +[here](../extended-json.md). ## Test Plan diff --git a/source/client-side-encryption/client-side-encryption.md b/source/client-side-encryption/client-side-encryption.md index b56d34a659..5314d079d2 100644 --- a/source/client-side-encryption/client-side-encryption.md +++ b/source/client-side-encryption/client-side-encryption.md @@ -2,7 +2,7 @@ - Status: Accepted - Minimum Server Version: 4.2 (CSFLE), 6.0 (Queryable Encryption) -- Version: 1.13.0 +- Version: 1.14.0 ______________________________________________________________________ @@ -296,7 +296,7 @@ as described in [Handling of Native UUID Types](../uuid.rst). ### MongoClient Changes - + ```typescript class MongoClient { @@ -315,7 +315,7 @@ class MongoClient { } ``` - + ```typescript class AutoEncryptionOpts { @@ -423,12 +423,8 @@ Drivers MUST document that an additional `MongoClient` may be created, using the See [What's the deal with metadataClient, keyVaultClient, and the internal client?](#whats-the-deal-with-metadataclient-keyvaultclient-and-the-internal-client) - - - - - - + + #### kmsProviders @@ -570,7 +566,7 @@ Once requested, drivers MUST create a new [KMSProviders](#kmsproviders) $P$ acco $t_0 + d\_{exp}$. 6. Return $P$ as the additional KMS providers to [libmongocrypt](#libmongocrypt). - + ##### Obtaining GCP Credentials @@ -744,7 +740,7 @@ Drivers MUST implement extraOptions in a way that allows deprecating/removing op break, such as with a BSON document or map type instead of a struct type with fixed fields. See [Why are extraOptions and kmsProviders maps?](#why-are-extraoptions-and-kmsproviders-maps). - + ##### `extraOptions.cryptSharedLibPath` @@ -757,7 +753,7 @@ Allow the user to specify an absolute path to a [crypt_shared](#crypt_shared) dy - [Path Resolution Behavior](#path-resolution-behavior) - [Enabling crypt_shared](#enabling-crypt_shared) - + ##### `extraOptions.cryptSharedLibRequired` @@ -770,8 +766,7 @@ If, after initializing a `libmongocrypt_handle`, [crypt_shared](#crypt_shared) i [extraOptions.cryptSharedLibRequired](#extraoptions.cryptsharedlibrequired) is `true`, the driver MUST consider the `libmongocrypt_handle` to be invalid and return an error to the user. Refer: - - + #### encryptedFieldsMap @@ -802,7 +797,7 @@ See [Why is bypassQueryAnalysis needed?](#why-is-bypassqueryanalysis-needed). A collection supporting Queryable Encryption requires an index and three additional collections. - + #### Collection `encryptedFields` Lookup (GetEncryptedFields) @@ -982,8 +977,8 @@ class ClientEncryption { // 2. An Aggregate Expression of this form: // {$and: [{$gt: [, ]}, {$lt: [, ]}] // $gt may also be $gte. $lt may also be $lte. - // Only supported when queryType is "rangePreview" and algorithm is "RangePreview". - // NOTE: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. + // Only supported when queryType is "range" and algorithm is "Range". + // NOTE: The "range" queryType and "Range" algorithm are currently unstable API and subject to backwards breaking changes. encryptExpression(expr: Document, opts: EncryptOpts): Document; // Decrypts an encrypted value (BSON binary of subtype 6). @@ -996,9 +991,9 @@ class ClientEncryption { } ``` - + - + ```typescript interface ClientEncryptionOpts { @@ -1168,17 +1163,20 @@ class EncryptOpts { rangeOpts: Optional } -// NOTE: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. -// RangeOpts specifies index options for a Queryable Encryption field supporting "rangePreview" queries. -// min, max, sparsity, and precision must match the values set in the encryptedFields of the destination collection. +// NOTE: RangeOpts is currently unstable API and subject to backwards breaking changes. +// RangeOpts specifies index options for a Queryable Encryption field supporting "range" queries. +// min, max, trimFactor, sparsity, and precision must match the values set in the encryptedFields of the destination collection. // For double and decimal128, min/max/precision must all be set, or all be unset. class RangeOpts { - // min is required if precision is set. + // min is the minimum value for the encrypted index. Required if precision is set. min: Optional, - // max is required if precision is set. + // max is the maximum value for the encrypted index. Required if precision is set. max: Optional, - sparsity: Int64, - // precision may only be set for double or decimal128. + // trimFactor may be used to tune performance. When omitted, a default value is used. + trimFactor: Optional, + // sparsity may be used to tune performance. When omitted, a default value is used. + sparsity: Optional, + // precision determines the number of significant digits after the decimal point. May only be set for double or decimal128. precision: Optional } ``` @@ -1202,46 +1200,43 @@ One of the strings: - "AEAD_AES_256_CBC_HMAC_SHA_512-Random" - "Indexed" - "Unindexed" -- "RangePreview" +- "Range" (unstable) -The result of explicit encryption with the "Indexed" or "RangePreview" algorithm must be processed by the server to -insert or query. Drivers MUST document the following behavior: +The result of explicit encryption with the "Indexed" or "Range" algorithm must be processed by the server to insert or +query. Drivers MUST document the following behavior: -> To insert or query with an "Indexed" or "RangePreview" encrypted payload, use a `MongoClient` configured with +> To insert or query with an "Indexed" or "Range" encrypted payload, use a `MongoClient` configured with > `AutoEncryptionOpts`. `AutoEncryptionOpts.bypassQueryAnalysis` may be true. `AutoEncryptionOpts.bypassAutoEncryption` > must be false. > [!NOTE] -> The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +> The "Range" algorithm is currently unstable API and subject to backwards breaking changes. #### contentionFactor -contentionFactor only applies when algorithm is "Indexed" or "RangePreview". It is an error to set contentionFactor when -algorithm is not "Indexed" or "RangePreview". - -> [!NOTE] -> The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +contentionFactor may be used to tune performance. Only applies when algorithm is "Indexed" or "Range". libmongocrypt +returns an error if contentionFactor is set for a non-applicable algorithm. #### queryType One of the strings: - "equality" -- "rangePreview" +- "range" -queryType only applies when algorithm is "Indexed" or "RangePreview". It is an error to set queryType when algorithm is -not "Indexed" or "RangePreview". +queryType only applies when algorithm is "Indexed" or "Range". libmongocrypt returns an error if queryType is set for a +non-applicable queryType. > [!NOTE] -> The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +> The "range" queryType is currently unstable API and subject to backwards breaking changes. #### rangeOpts -rangeOpts only applies when algorithm is "rangePreview". It is an error to set rangeOpts when algorithm is not -"rangePreview". +rangeOpts only applies when algorithm is "range". libmongocrypt returns an error if rangeOpts is set for a +non-applicable algorithm. > [!NOTE] -> The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +> rangeOpts is currently unstable API and subject to backwards breaking changes. ## User facing API: When Auto Encryption Fails @@ -1364,7 +1359,7 @@ Drivers MUST propagate errors from libmongocrypt in whatever way is idiomatic to etc.). These errors MUST be distinguished in some way (e.g. exception type) to make it easier for users to distinguish when a command fails due to client side encryption. - + ## Enabling Command Marking with the `crypt_shared` Library @@ -1384,8 +1379,7 @@ facilitate driver testing with [crypt_shared](#crypt_shared) (Refer: > The driver MUST NOT manipulate or do any validation on the [crypt_shared](#crypt_shared) path options provided in > [extraOptions](#extraoptions). They should be passed through to [libmongocrypt](#libmongocrypt) unchanged. - - + ### Setting Search Paths @@ -1404,7 +1398,7 @@ execution from the ambient state of the host system. Refer to: [Path Resolution Behavior](#path-resolution-behavior) and [Search Paths for Testing](#search-paths-for-testing) - + ### Overriding the `crypt_shared` Library Path @@ -1478,7 +1472,7 @@ successfully loaded by asking [libmongocrypt](#libmongocrypt) for the [crypt_sha the result is an empty string, [libmongocrypt](#libmongocrypt) did not load [crypt_shared](#crypt_shared) and the driver must rely on [mongocryptd](#mongocryptd) to mark command documents for encryption. - + ### "Disabling" `crypt_shared` @@ -1574,7 +1568,7 @@ If the [crypt_shared](#crypt_shared) library is loaded, the driver MUST NOT atte Single-threaded drivers MUST connect with [serverSelectionTryOnce=false](../server-selection/server-selection.md#serverselectiontryonce), `connectTimeoutMS=10000`, and MUST bypass -[cooldownMS](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#cooldownms) when connecting to +[cooldownMS](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#cooldownms) when connecting to mongocryptd. See [Why are serverSelectionTryOnce and cooldownMS disabled for single-threaded drivers connecting to mongocryptd?](#why-are-serverselectiontryonce-and-cooldownms-disabled-for-single-threaded-drivers-connecting-to-mongocryptd) @@ -2115,7 +2109,7 @@ server before making another attempt. Meaning if the first attempt to mongocrypt observe a 5 second delay. This is not configurable in the URI, so this must be overridden internally. Since mongocryptd is a local process, there should only be a very short delay after spawning mongocryptd for it to start listening on sockets. See the SDAM spec description of -[cooldownMS](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#cooldownms). +[cooldownMS](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#cooldownms). Because single threaded drivers may exceed `serverSelectionTimeoutMS` by the duration of the topology scan, `connectTimeoutMS` is also reduced. @@ -2385,10 +2379,16 @@ libmongocrypt would create multiple OP_MSGs to send. Key management functions currently assume there are no concurrent accesses of the key vault collection being operated on. To support concurrent access of the key vault collection, the key management functions may be overloaded to take an -explicit session parameter as described in the [Drivers Sessions Specification](../sessions/driver-sessions.rst). +explicit session parameter as described in the [Drivers Sessions Specification](../sessions/driver-sessions.md). ## Changelog +- 2024-07-22: Make `trimFactor` and `sparsity` optional. + +- 2024-06-13: Document range as unstable. + +- 2024-05-31: Replace rangePreview with range. + - 2024-03-20: Add `delegated` option to "kmip" KMS provider - 2024-02-27: Migrated from reStructuredText to Markdown. diff --git a/source/client-side-encryption/etc/data/range-encryptedFields-Date.json b/source/client-side-encryption/etc/data/range-encryptedFields-Date.json index 97a2b2d4e5..defa6e37ff 100644 --- a/source/client-side-encryption/etc/data/range-encryptedFields-Date.json +++ b/source/client-side-encryption/etc/data/range-encryptedFields-Date.json @@ -10,10 +10,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -30,4 +33,4 @@ } } ] -} +} \ No newline at end of file diff --git a/source/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json b/source/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json index 4d284475f4..dbe28e9c10 100644 --- a/source/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json +++ b/source/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json @@ -10,14 +10,17 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } } } ] -} +} \ No newline at end of file diff --git a/source/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json b/source/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json index 53449182b2..538ab20f0e 100644 --- a/source/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json +++ b/source/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json @@ -10,10 +10,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -29,4 +32,4 @@ } } ] -} +} \ No newline at end of file diff --git a/source/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json b/source/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json index b478a772d7..fb4f46d375 100644 --- a/source/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json +++ b/source/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json @@ -10,14 +10,17 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } } } ] -} +} \ No newline at end of file diff --git a/source/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json b/source/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json index 395a369680..07d1c84d6f 100644 --- a/source/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json +++ b/source/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json @@ -10,10 +10,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -29,4 +32,4 @@ } } ] -} +} \ No newline at end of file diff --git a/source/client-side-encryption/etc/data/range-encryptedFields-Int.json b/source/client-side-encryption/etc/data/range-encryptedFields-Int.json index 61b7082dff..4f0b4854e4 100644 --- a/source/client-side-encryption/etc/data/range-encryptedFields-Int.json +++ b/source/client-side-encryption/etc/data/range-encryptedFields-Int.json @@ -10,10 +10,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -26,4 +29,4 @@ } } ] -} +} \ No newline at end of file diff --git a/source/client-side-encryption/etc/data/range-encryptedFields-Long.json b/source/client-side-encryption/etc/data/range-encryptedFields-Long.json index b18b84b6e8..32fe1ea15d 100644 --- a/source/client-side-encryption/etc/data/range-encryptedFields-Long.json +++ b/source/client-side-encryption/etc/data/range-encryptedFields-Long.json @@ -10,10 +10,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -26,4 +29,4 @@ } } ] -} +} \ No newline at end of file diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Aggregate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Aggregate.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Aggregate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Aggregate.yml.template index 9dcc5b2324..cb13aa6542 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Aggregate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Aggregate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -185,12 +177,6 @@ tests: "_id": 1, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Correctness.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Correctness.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Correctness.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Correctness.yml.template index 730245932c..2ede184dba 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Correctness.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Correctness.yml.template @@ -3,12 +3,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Delete.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Delete.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Delete.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Delete.yml.template index 5c407e1a42..bc58547539 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Delete.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Delete.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-FindOneAndUpdate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-FindOneAndUpdate.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-FindOneAndUpdate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-FindOneAndUpdate.yml.template index 089b42a8a6..92c48ecfd2 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-FindOneAndUpdate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-FindOneAndUpdate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -183,12 +175,6 @@ tests: "_id": 1, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-InsertFind.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-InsertFind.yml.template similarity index 93% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-InsertFind.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-InsertFind.yml.template index 93e8c1a739..419e50ab10 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-InsertFind.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-InsertFind.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -179,12 +171,6 @@ tests: "_id": 1, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Update.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Update.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Update.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Update.yml.template index 202a3b34a2..4b9410af57 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Date-Update.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Date-Update.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -135,12 +133,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -196,12 +188,6 @@ tests: "_id": 1, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Aggregate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Aggregate.yml.template similarity index 99% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Aggregate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Aggregate.yml.template index 014e46d89a..02d2699c65 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Aggregate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Aggregate.yml.template @@ -1,13 +1,11 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -127,12 +125,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -910,12 +902,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Correctness.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Correctness.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Correctness.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Correctness.yml.template index 8ca44929d7..fec83b77bc 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Correctness.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Correctness.yml.template @@ -3,13 +3,11 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Delete.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Delete.yml.template similarity index 99% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Delete.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Delete.yml.template index b208088cd7..46c1554c6d 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Delete.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Delete.yml.template @@ -1,13 +1,11 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -129,12 +127,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-FindOneAndUpdate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-FindOneAndUpdate.yml.template similarity index 99% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-FindOneAndUpdate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-FindOneAndUpdate.yml.template index 00abc4b259..ca2e1da60f 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-FindOneAndUpdate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-FindOneAndUpdate.yml.template @@ -1,13 +1,11 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -125,12 +123,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -908,12 +900,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-InsertFind.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-InsertFind.yml.template similarity index 99% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-InsertFind.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-InsertFind.yml.template index 4d62deae8d..13c3e82a49 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-InsertFind.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-InsertFind.yml.template @@ -1,13 +1,11 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -121,12 +119,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -904,12 +896,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Update.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Update.yml.template similarity index 99% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Update.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Update.yml.template index bc36f2b40f..a7795a2e32 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Decimal-Update.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Decimal-Update.yml.template @@ -1,13 +1,11 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -138,12 +136,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -921,12 +913,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Aggregate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Aggregate.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Aggregate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Aggregate.yml.template index 7962b27c8a..f6a6bc0bff 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Aggregate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Aggregate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -126,12 +124,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -231,12 +223,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Correctness.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Correctness.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Correctness.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Correctness.yml.template index 415f245ce1..51e6fa40e5 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Correctness.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Correctness.yml.template @@ -3,12 +3,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Delete.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Delete.yml.template similarity index 96% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Delete.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Delete.yml.template index 09e310d0d2..a170ebd1b3 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Delete.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Delete.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -128,12 +126,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.yml.template index 9b4ad94ffe..10959b2d45 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -124,12 +122,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -229,12 +221,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-InsertFind.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-InsertFind.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-InsertFind.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-InsertFind.yml.template index d16f6437b4..2b58b783a0 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-InsertFind.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-InsertFind.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -221,12 +213,6 @@ tests: "_id": 1, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Update.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Update.yml.template similarity index 96% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Update.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Update.yml.template index a560dc773d..e4fd388435 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DecimalPrecision-Update.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DecimalPrecision-Update.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -135,12 +133,6 @@ tests: "_id": 0, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -238,12 +230,6 @@ tests: "_id": 1, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Defaults.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Defaults.yml.template new file mode 100644 index 0000000000..f92a48d314 --- /dev/null +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Defaults.yml.template @@ -0,0 +1,157 @@ +# Test "range" field with defaults for `trimFactor` and `sparsity`. +# Test requires libmongocrypt with changes in 14ccd9ce (MONGOCRYPT-698). +runOn: + - minServerVersion: "8.0.0" # Requires 8.0.0-rc13. + topology: [ "replicaset", "sharded", "load-balanced" ] # Exclude "standalone". QE collections are not supported on standalone. +database_name: &database_name "default" +collection_name: &collection_name "default" +data: [] +encrypted_fields: &encrypted_fields { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + # Exclude `trimFactor` and `sparsity` + "contention": { "$numberLong": "0" }, + "min": { "$numberInt": "0" }, + "max": { "$numberInt": "200" } + } + } + ] +} +key_vault_data: [ {{ yamlfile("keys/key1-document.json") }} ] +tests: + - description: "FLE2 Range applies defaults for trimFactor and sparsity" + clientOptions: + autoEncryptOpts: + kmsProviders: + local: {{ local_provider() }} + operations: + - name: insertOne + arguments: + document: &doc0 { _id: 0, encryptedInt: { $numberInt: "0" } } + - name: insertOne + arguments: + document: &doc1 { _id: 1, encryptedInt: { $numberInt: "1" } } + - name: find + arguments: + filter: { encryptedInt: { $gt: { $numberInt: "0" } } } + result: [*doc1] + expectations: + - command_started_event: + command: + listCollections: 1 + filter: + name: *collection_name + command_name: listCollections + - command_started_event: + command: + find: datakeys + filter: { + "$or": [ + { + "_id": { + "$in": [ + {{ yamlfile("keys/key1-id.json") }} + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + } + $db: keyvault + readConcern: { level: "majority" } + command_name: find + - command_started_event: + command: + insert: *collection_name + documents: + - &doc0_encrypted { "_id": 0, "encryptedInt": { $$type: "binData" } } + ordered: true + encryptionInformation: &encryptionInformation + type: 1 + schema: + default.default: + # libmongocrypt applies escCollection and ecocCollection to outgoing command. + escCollection: "enxcol_.default.esc" + ecocCollection: "enxcol_.default.ecoc" + <<: *encrypted_fields + command_name: insert + - command_started_event: + command: + insert: *collection_name + documents: + - &doc1_encrypted { "_id": 1, "encryptedInt": { $$type: "binData" } } + ordered: true + encryptionInformation: *encryptionInformation + command_name: insert + - command_started_event: + command: + find: *collection_name + filter: + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DfQaAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + encryptionInformation: *encryptionInformation + command_name: find + outcome: + collection: + data: + - + { + "_id": 0, + "encryptedInt": { $$type: "binData" }, + # Expected contents of `__safeContent__` require MONGOCRYPT-698 to apply expected `trimFactor`. + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + } + - + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { $$type: "binData" }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Aggregate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Aggregate.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Aggregate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Aggregate.yml.template index c70178396a..673eb6e039 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Aggregate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Aggregate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -521,12 +513,6 @@ tests: "_id": 1, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Correctness.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Correctness.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Correctness.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Correctness.yml.template index 1e708f837d..5461a72e0d 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Correctness.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Correctness.yml.template @@ -3,12 +3,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Delete.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Delete.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Delete.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Delete.yml.template index 4242a4ac05..a32c7f38ad 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Delete.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Delete.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-FindOneAndUpdate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-FindOneAndUpdate.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-FindOneAndUpdate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-FindOneAndUpdate.yml.template index 8ae1756f4f..033d07cf1c 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-FindOneAndUpdate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-FindOneAndUpdate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -519,12 +511,6 @@ tests: "_id": 1, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-InsertFind.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-InsertFind.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-InsertFind.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-InsertFind.yml.template index d709bc05c5..28549c6b12 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-InsertFind.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-InsertFind.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -515,12 +507,6 @@ tests: "_id": 1, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Update.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Update.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Update.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Update.yml.template index bdb4a88d01..4c9dd4a3b4 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Double-Update.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Double-Update.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -135,12 +133,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -532,12 +524,6 @@ tests: "_id": 1, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Aggregate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Aggregate.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Aggregate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Aggregate.yml.template index ab3ba35953..8b6ab166e9 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Aggregate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Aggregate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -227,12 +219,6 @@ tests: "_id": 1, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Correctness.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Correctness.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Correctness.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Correctness.yml.template index 9357acae74..9d1b039d22 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Correctness.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Correctness.yml.template @@ -3,12 +3,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Delete.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Delete.yml.template similarity index 96% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Delete.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Delete.yml.template index ad5fc1a613..81e80bb6f6 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Delete.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Delete.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.yml.template index 442450e452..4c4ad69f71 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -225,12 +217,6 @@ tests: "_id": 1, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-InsertFind.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-InsertFind.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-InsertFind.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-InsertFind.yml.template index 3c065a184d..7157e847d5 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-InsertFind.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-InsertFind.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -221,12 +213,6 @@ tests: "_id": 1, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Update.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Update.yml.template similarity index 96% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Update.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Update.yml.template index 2e52f40af6..93e3328746 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-DoublePrecision-Update.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-DoublePrecision-Update.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -137,12 +135,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -240,12 +232,6 @@ tests: "_id": 1, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Aggregate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Aggregate.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Aggregate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Aggregate.yml.template index 15d979dc49..51fd98829c 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Aggregate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Aggregate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -185,12 +177,6 @@ tests: "_id": 1, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Correctness.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Correctness.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Correctness.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Correctness.yml.template index 6017dc1fa9..72b225fb1f 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Correctness.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Correctness.yml.template @@ -3,12 +3,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Delete.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Delete.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Delete.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Delete.yml.template index a6058c4cbf..55804557d5 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Delete.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Delete.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-FindOneAndUpdate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-FindOneAndUpdate.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-FindOneAndUpdate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-FindOneAndUpdate.yml.template index b8b7f4c6b7..b9381dd943 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-FindOneAndUpdate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-FindOneAndUpdate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -183,12 +175,6 @@ tests: "_id": 1, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-InsertFind.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-InsertFind.yml.template similarity index 93% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-InsertFind.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-InsertFind.yml.template index 3bdfe9e826..a067f7e936 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-InsertFind.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-InsertFind.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -179,12 +171,6 @@ tests: "_id": 1, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Update.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Update.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Update.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Update.yml.template index f0bb620324..26984cbc31 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Int-Update.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Int-Update.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -137,12 +135,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -198,12 +190,6 @@ tests: "_id": 1, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Aggregate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Aggregate.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Aggregate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Aggregate.yml.template index 8b40f4d05b..bfd89af2d1 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Aggregate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Aggregate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -185,12 +177,6 @@ tests: "_id": 1, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Correctness.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Correctness.yml.template similarity index 98% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Correctness.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Correctness.yml.template index ef6bb9dd6c..8da4e0187b 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Correctness.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Correctness.yml.template @@ -3,12 +3,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Delete.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Delete.yml.template similarity index 95% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Delete.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Delete.yml.template index 46831726a8..e0dd0f5116 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Delete.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Delete.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-FindOneAndUpdate.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-FindOneAndUpdate.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-FindOneAndUpdate.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-FindOneAndUpdate.yml.template index 88aed891d4..d144ebd27d 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-FindOneAndUpdate.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-FindOneAndUpdate.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -183,12 +175,6 @@ tests: "_id": 1, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-InsertFind.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-InsertFind.yml.template similarity index 93% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-InsertFind.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-InsertFind.yml.template index 077e41957b..d0fa60af12 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-InsertFind.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-InsertFind.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -179,12 +171,6 @@ tests: "_id": 1, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Update.yml.template b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Update.yml.template similarity index 94% rename from source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Update.yml.template rename to source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Update.yml.template index 55f7bfa036..617847082a 100644 --- a/source/client-side-encryption/etc/test-templates/fle2v2-Range-Long-Update.yml.template +++ b/source/client-side-encryption/etc/test-templates/fle2v2-Rangev2-Long-Update.yml.template @@ -1,12 +1,10 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -137,12 +135,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -198,12 +190,6 @@ tests: "_id": 1, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/etc/test-templates/kmipKMS.yml.template b/source/client-side-encryption/etc/test-templates/kmipKMS.yml.template index a09291c6cf..35a259abd1 100644 --- a/source/client-side-encryption/etc/test-templates/kmipKMS.yml.template +++ b/source/client-side-encryption/etc/test-templates/kmipKMS.yml.template @@ -55,18 +55,12 @@ tests: arguments: document: &doc1 { _id: 1, encrypted_string_kmip_delegated: "string0" } expectations: - - command_started_event: - command: - listCollections: 1 - filter: - name: *collection_name - command_name: listCollections # Auto encryption will request the collection info. - command_started_event: command: listCollections: 1 filter: - name: datakeys + name: *collection_name command_name: listCollections # Then key is fetched from the key vault. - command_started_event: diff --git a/source/client-side-encryption/etc/test-templates/timeoutMS.yml.template b/source/client-side-encryption/etc/test-templates/timeoutMS.yml.template index 5ee838fabe..c98fac5bae 100644 --- a/source/client-side-encryption/etc/test-templates/timeoutMS.yml.template +++ b/source/client-side-encryption/etc/test-templates/timeoutMS.yml.template @@ -38,8 +38,10 @@ tests: command_name: listCollections # Test that timeoutMS applies to the sum of all operations done for client-side encryption. This is done by blocking - # listCollections and find for 20ms each and running an insertOne with timeoutMS=50. There should be two - # listCollections commands and one "find" command, so the sum should take more than timeoutMS. + # listCollections and find for 30ms each and running an insertOne with timeoutMS=50. There should be one + # listCollections command and one "find" command, so the sum should take more than timeoutMS. A second listCollections + # event doesn't occur due to the internal MongoClient lacking configured auto encryption, plus libmongocrypt holds the + # collection schema in cache for a minute. # # This test does not include command monitoring expectations because the exact command sequence is dependent on the # amount of time taken by mongocryptd communication. In slow runs, mongocryptd communication can breach the timeout @@ -47,11 +49,11 @@ tests: - description: "remaining timeoutMS applied to find to get keyvault data" failPoint: configureFailPoint: failCommand - mode: { times: 3 } + mode: { times: 2 } data: failCommands: ["listCollections", "find"] blockConnection: true - blockTimeMS: 20 + blockTimeMS: 30 clientOptions: autoEncryptOpts: kmsProviders: diff --git a/source/client-side-encryption/subtype6.md b/source/client-side-encryption/subtype6.md index 7ee1c55c57..bcdd1500f5 100644 --- a/source/client-side-encryption/subtype6.md +++ b/source/client-side-encryption/subtype6.md @@ -103,7 +103,7 @@ data into one BSON value that can be treated as an opaque blob in most contexts. If we used separate subtypes, we'd need to reserve three (and possibly more in the future) of our 124 remaining subtypes. - + ### Why are intent-to-encrypt markings needed? diff --git a/source/client-side-encryption/tests/README.md b/source/client-side-encryption/tests/README.md index 2877fbb964..01b693da34 100644 --- a/source/client-side-encryption/tests/README.md +++ b/source/client-side-encryption/tests/README.md @@ -2815,12 +2815,11 @@ This test is continuation of the case 1 and provides a way to complete inserting ### 22. Range Explicit Encryption -The Range Explicit Encryption tests require MongoDB server 7.0+. The tests must not run against a standalone. The tests -must be skipped on MongoDB server 8.0+. +The Range Explicit Encryption tests require MongoDB server 8.0+. > [!NOTE] -> MongoDB Server 8.0 introduced a backwards breaking change to the Queryable Encryption (QE) range protocol: QE Range -> V2. Skip tests using `rangePreview` when using Server 8.0 or newer until DRIVERS-2767 is addressed. +> MongoDB Server 8.0 introduced a backwards breaking change to the Queryable Encryption (QE) range protocol: QE Range V2 +> libmongocrypt 1.10.0 is required to use the QE Range V2. > [!NOTE] > MongoDB Server 7.0 introduced a backwards breaking change to the Queryable Encryption (QE) protocol: QEv2. @@ -2886,7 +2885,7 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", + algorithm: "Range", contentionFactor: 0, rangeOpts: , } @@ -2913,6 +2912,7 @@ skipped. ```typescript class RangeOpts { + trimFactor: 1, sparsity: 1, } ``` @@ -2923,6 +2923,7 @@ skipped. class RangeOpts { min: { "$numberDecimal": "0" }, max: { "$numberDecimal": "200" }, + trimFactor: 1, sparsity: 1, precision: 2, } @@ -2932,6 +2933,7 @@ skipped. ```typescript class RangeOpts { + trimFactor: 1 sparsity: 1, } ``` @@ -2942,6 +2944,7 @@ skipped. class RangeOpts { min: { "$numberDouble": "0" }, max: { "$numberDouble": "200" }, + trimFactor: 1, sparsity: 1, precision: 2, } @@ -2953,6 +2956,7 @@ skipped. class RangeOpts { min: {"$date": { "$numberLong": "0" } } , max: {"$date": { "$numberLong": "200" } }, + trimFactor: 1, sparsity: 1, } ``` @@ -2963,6 +2967,7 @@ skipped. class RangeOpts { min: {"$numberInt": "0" } , max: {"$numberInt": "200" }, + trimFactor: 1, sparsity: 1, } ``` @@ -2973,6 +2978,7 @@ skipped. class RangeOpts { min: {"$numberLong": "0" } , max: {"$numberLong": "200" }, + trimFactor: 1, sparsity: 1, } ``` @@ -2987,7 +2993,7 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", + algorithm: "Range", contentionFactor: 0, rangeOpts: , } @@ -3016,8 +3022,8 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", - queryType: "rangePreview", + algorithm: "Range", + queryType: "range", contentionFactor: 0, rangeOpts: , } @@ -3051,8 +3057,8 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", - queryType: "rangePreview", + algorithm: "Range", + queryType: "range", contentionFactor: 0, rangeOpts: , } @@ -3085,8 +3091,8 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", - queryType: "rangePreview", + algorithm: "Range", + queryType: "range", contentionFactor: 0, rangeOpts: , } @@ -3118,8 +3124,8 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", - queryType: "rangePreview", + algorithm: "Range", + queryType: "range", contentionFactor: 0, rangeOpts: , } @@ -3150,7 +3156,7 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", + algorithm: "Range", contentionFactor: 0, rangeOpts: , } @@ -3172,7 +3178,7 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", + algorithm: "Range", contentionFactor: 0, rangeOpts: , } @@ -3197,10 +3203,95 @@ Encrypt using the following `EncryptOpts`: ```typescript class EncryptOpts { keyId : , - algorithm: "RangePreview", + algorithm: "Range", contentionFactor: 0, rangeOpts: , } ``` Assert that an error was raised. + +### 22. Range Explicit Encryption applies defaults + +This test requires libmongocrypt with changes in +[14ccd9ce](https://github.com/mongodb/libmongocrypt/commit/14ccd9ce8a030158aec07f63e8139d34b95d88e6) +([MONGOCRYPT-698](https://jira.mongodb.org/browse/MONGOCRYPT-698)). + +#### Test Setup + +Create a MongoClient named `keyVaultClient`. + +Create a ClientEncryption object named `clientEncryption` with these options: + +```typescript +class ClientEncryptionOpts { + keyVaultClient: keyVaultClient, + keyVaultNamespace: "keyvault.datakeys", + kmsProviders: { "local": { "key": "" } }, +} +``` + +Create a key with `clientEncryption.createDataKey`. Store the returned key ID in a variable named `keyId`. + +Call `clientEncryption.encrypt` to encrypt the int32 value `123` with these options: + +```typescript +class EncryptOpts { + keyId : keyId, + algorithm: "Range", + contentionFactor: 0, + rangeOpts: RangeOpts { + min: 0, + max: 1000 + } +} +``` + +Store the result in a variable named `payload_defaults`. + +#### Case 1: Uses libmongocrypt defaults + +Call `clientEncryption.encrypt` to encrypt the int32 value `123` with these options: + +```typescript +class EncryptOpts { + keyId : keyId, + algorithm: "Range", + contentionFactor: 0, + rangeOpts: RangeOpts { + min: 0, + max: 1000, + sparsity: 2, + trimFactor: 6 + } +} +``` + +Assert the returned payload size equals the size of `payload_defaults`. + +> [!NOTE] +> Do not compare the payload contents. The payloads include random data. The `trimFactor` and `sparsity` directly affect +> the payload size. + +#### Case 2: Accepts `trimFactor` 0 + +Call `clientEncryption.encrypt` to encrypt the int32 value `123` with these options: + +```typescript +class EncryptOpts { + keyId : keyId, + algorithm: "Range", + contentionFactor: 0, + rangeOpts: RangeOpts { + min: 0, + max: 1000, + trimFactor: 0 + } +} +``` + +Assert the returned payload size is greater than the size of `payload_defaults`. + +> [!NOTE] +> Do not compare the payload contents. The payloads include random data. The `trimFactor` and `sparsity` directly affect +> the payload size. diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Aggregate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Aggregate.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Aggregate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Aggregate.json index 9eaabe0d71..63a2db3ef1 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Aggregate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -226,10 +228,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -283,10 +288,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -346,10 +354,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -383,12 +394,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -445,12 +450,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Aggregate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Aggregate.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Aggregate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Aggregate.yml index c0f6179445..9f36eec0ad 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Aggregate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Aggregate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Date. Aggregate." @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -185,12 +177,6 @@ tests: "_id": 1, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Correctness.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Correctness.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Correctness.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Correctness.json index fa887e0892..fae25a1c02 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Correctness.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Correctness.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Correctness.yml similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Correctness.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Correctness.yml index 49f66ae285..f7ed9fb93f 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Correctness.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Correctness.yml @@ -3,16 +3,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "Find with $gt" diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Delete.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Delete.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Delete.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Delete.json index cce4faf188..63a2b29fcc 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Delete.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -215,10 +217,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -272,10 +277,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -336,10 +344,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -373,12 +384,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Delete.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Delete.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Delete.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Delete.yml index 689d93a716..17f954e045 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Delete.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Delete.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Date. Delete." @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-FindOneAndUpdate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-FindOneAndUpdate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json index 4392b67686..049186c869 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-FindOneAndUpdate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -230,10 +232,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -287,10 +292,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -352,10 +360,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -389,12 +400,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -451,12 +456,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-FindOneAndUpdate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-FindOneAndUpdate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.yml index 69418e441c..99ed076aa7 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-FindOneAndUpdate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Date. FindOneAndUpdate." @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -183,12 +175,6 @@ tests: "_id": 1, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-InsertFind.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-InsertFind.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-InsertFind.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-InsertFind.json index 27ce7881df..d0751434b5 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-InsertFind.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -222,10 +224,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -279,10 +284,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -337,10 +345,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -374,12 +385,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -436,12 +441,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-InsertFind.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-InsertFind.yml similarity index 92% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-InsertFind.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-InsertFind.yml index 9ad57efa71..c55ba9eeee 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-InsertFind.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-InsertFind.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Date. Insert and Find." @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -179,12 +171,6 @@ tests: "_id": 1, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Update.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Update.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Update.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Update.json index f7d5a6af66..1e7750feeb 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Update.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -226,10 +228,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -283,10 +288,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -354,10 +362,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -391,12 +402,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -453,12 +458,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Update.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Update.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Update.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Update.yml index 2dd35dfaa6..f81d20a178 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Date-Update.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Date-Update.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDate', 'bsonType': 'date', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$date': {'$numberLong': '0'}}, 'max': {'$date': {'$numberLong': '200'}}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Date. Update." @@ -135,12 +133,6 @@ tests: "_id": 0, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -196,12 +188,6 @@ tests: "_id": 1, "encryptedDate": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Aggregate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Aggregate.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Aggregate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Aggregate.json index 401ee34e3f..5f573a933d 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Aggregate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Aggregate.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -206,10 +208,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -253,10 +258,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -306,10 +314,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -335,12 +346,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -1119,12 +1124,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Aggregate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Aggregate.yml similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Aggregate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Aggregate.yml index 4debfefc80..c0bbff5900 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Aggregate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Aggregate.yml @@ -1,17 +1,15 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Decimal. Aggregate." @@ -127,12 +125,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -910,12 +902,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Correctness.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Correctness.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Correctness.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Correctness.json index 758d3e5732..4316a31c3e 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Correctness.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Correctness.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Correctness.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Correctness.yml similarity index 97% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Correctness.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Correctness.yml index 4eef897c4a..8154624799 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Correctness.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Correctness.yml @@ -3,17 +3,15 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "Find with $gt" diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Delete.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Delete.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Delete.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Delete.json index 24a08f318c..a94dd40fee 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Delete.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Delete.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -197,10 +199,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -244,10 +249,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -298,10 +306,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -327,12 +338,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Delete.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Delete.yml similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Delete.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Delete.yml index aad79c5459..ca9c58f92a 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Delete.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Delete.yml @@ -1,17 +1,15 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Decimal. Delete." @@ -129,12 +127,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json index 2a8070ecf9..5226facfb6 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -208,10 +210,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -255,10 +260,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -310,10 +318,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -339,12 +350,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -1123,12 +1128,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.yml similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.yml index d71ba28c5b..2869767add 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.yml @@ -1,17 +1,15 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Decimal. FindOneAndUpdate." @@ -125,12 +123,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -908,12 +900,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-InsertFind.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-InsertFind.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-InsertFind.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-InsertFind.json index 2ef63f42b9..b6615454bd 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-InsertFind.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-InsertFind.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -202,10 +204,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -249,10 +254,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -297,10 +305,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -326,12 +337,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -1110,12 +1115,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-InsertFind.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-InsertFind.yml similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-InsertFind.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-InsertFind.yml index 9e70ff9728..69c58a7738 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-InsertFind.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-InsertFind.yml @@ -1,17 +1,15 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Decimal. Insert and Find." @@ -121,12 +119,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -904,12 +896,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Update.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Update.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Update.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Update.json index 8064eb1b18..ceef8ca9ba 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Update.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Update.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -206,10 +208,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -253,10 +258,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -314,10 +322,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -343,12 +354,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -1127,12 +1132,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Update.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Update.yml similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Update.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Update.yml index f06c13a4eb..32e93c41da 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Decimal-Update.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Decimal-Update.yml @@ -1,17 +1,15 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalNoPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Decimal. Update." @@ -138,12 +136,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -921,12 +913,6 @@ tests: }, "encryptedDecimalNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json index 8cf143c094..35cc4aba87 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -217,10 +219,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -273,10 +278,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -335,10 +343,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -373,12 +384,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -479,12 +484,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Aggregate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.yml similarity index 94% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Aggregate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.yml index 43f1df6864..79d5267b01 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Aggregate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DecimalPrecision. Aggregate." @@ -126,12 +124,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -231,12 +223,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Correctness.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Correctness.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json index a4b06998f7..8954445887 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Correctness.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Correctness.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.yml similarity index 97% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Correctness.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.yml index c4b037bde8..cd1ced0b8f 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Correctness.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.yml @@ -3,16 +3,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "Find with $gt" diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Delete.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Delete.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json index fad8234838..e000c40589 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Delete.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -208,10 +210,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -264,10 +269,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -327,10 +335,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -365,12 +376,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Delete.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.yml similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Delete.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.yml index cb10767df1..7c3b7623be 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Delete.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DecimalPrecision. Delete." @@ -128,12 +126,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json index fb8f4f4140..27f10a30a7 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -219,10 +221,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -275,10 +280,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -339,10 +347,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -377,12 +388,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -483,12 +488,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.yml similarity index 94% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.yml index 2c67b36381..2bf6c16866 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DecimalPrecision. FindOneAndUpdate." @@ -124,12 +122,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -229,12 +221,6 @@ tests: }, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json index 79562802e6..5fb96730d6 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -213,10 +215,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -269,10 +274,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -326,10 +334,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -362,12 +373,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -466,12 +471,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-InsertFind.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.yml similarity index 94% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-InsertFind.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.yml index f01401718d..5bde559848 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-InsertFind.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DecimalPrecision. Insert and Find." @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -221,12 +213,6 @@ tests: "_id": 1, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Update.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Update.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json index cc93b76948..f67ae3ca23 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Update.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -217,10 +219,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -273,10 +278,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -343,10 +351,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -379,12 +390,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -483,12 +488,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Update.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Update.yml similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Update.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Update.yml index 22beb93e94..75763dcb60 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DecimalPrecision-Update.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DecimalPrecision-Update.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDecimalPrecision', 'bsonType': 'decimal', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDecimal': '0.0'}, 'max': {'$numberDecimal': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DecimalPrecision. Update." @@ -135,12 +133,6 @@ tests: "_id": 0, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -238,12 +230,6 @@ tests: "_id": 1, "encryptedDecimalPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Defaults.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Defaults.json new file mode 100644 index 0000000000..f32c8387ef --- /dev/null +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Defaults.json @@ -0,0 +1,381 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range applies defaults for trimFactor and sparsity", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DfQaAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Defaults.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Defaults.yml new file mode 100644 index 0000000000..8f91713da5 --- /dev/null +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Defaults.yml @@ -0,0 +1,157 @@ +# Test "range" field with defaults for `trimFactor` and `sparsity`. +# Test requires libmongocrypt with changes in 14ccd9ce (MONGOCRYPT-698). +runOn: + - minServerVersion: "8.0.0" # Requires 8.0.0-rc13. + topology: [ "replicaset", "sharded", "load-balanced" ] # Exclude "standalone". QE collections are not supported on standalone. +database_name: &database_name "default" +collection_name: &collection_name "default" +data: [] +encrypted_fields: &encrypted_fields { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + # Exclude `trimFactor` and `sparsity` + "contention": { "$numberLong": "0" }, + "min": { "$numberInt": "0" }, + "max": { "$numberInt": "200" } + } + } + ] +} +key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] +tests: + - description: "FLE2 Range applies defaults for trimFactor and sparsity" + clientOptions: + autoEncryptOpts: + kmsProviders: + local: {'key': {'$binary': {'base64': 'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk', 'subType': '00'}}} + operations: + - name: insertOne + arguments: + document: &doc0 { _id: 0, encryptedInt: { $numberInt: "0" } } + - name: insertOne + arguments: + document: &doc1 { _id: 1, encryptedInt: { $numberInt: "1" } } + - name: find + arguments: + filter: { encryptedInt: { $gt: { $numberInt: "0" } } } + result: [*doc1] + expectations: + - command_started_event: + command: + listCollections: 1 + filter: + name: *collection_name + command_name: listCollections + - command_started_event: + command: + find: datakeys + filter: { + "$or": [ + { + "_id": { + "$in": [ + {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}} + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + } + $db: keyvault + readConcern: { level: "majority" } + command_name: find + - command_started_event: + command: + insert: *collection_name + documents: + - &doc0_encrypted { "_id": 0, "encryptedInt": { $$type: "binData" } } + ordered: true + encryptionInformation: &encryptionInformation + type: 1 + schema: + default.default: + # libmongocrypt applies escCollection and ecocCollection to outgoing command. + escCollection: "enxcol_.default.esc" + ecocCollection: "enxcol_.default.ecoc" + <<: *encrypted_fields + command_name: insert + - command_started_event: + command: + insert: *collection_name + documents: + - &doc1_encrypted { "_id": 1, "encryptedInt": { $$type: "binData" } } + ordered: true + encryptionInformation: *encryptionInformation + command_name: insert + - command_started_event: + command: + find: *collection_name + filter: + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DfQaAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + encryptionInformation: *encryptionInformation + command_name: find + outcome: + collection: + data: + - + { + "_id": 0, + "encryptedInt": { $$type: "binData" }, + # Expected contents of `__safeContent__` require MONGOCRYPT-698 to apply expected `trimFactor`. + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + } + - + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { $$type: "binData" }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } \ No newline at end of file diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Aggregate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Aggregate.json similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Aggregate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Aggregate.json index 79f26660f2..e14ca8ff0c 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Aggregate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -208,10 +210,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -255,10 +260,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -308,10 +316,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -335,12 +346,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -733,12 +738,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Aggregate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Aggregate.yml similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Aggregate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Aggregate.yml index 83ca7fb90f..63e06a886f 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Aggregate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Aggregate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Double. Aggregate." @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -521,12 +513,6 @@ tests: "_id": 1, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Correctness.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Correctness.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Correctness.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Correctness.json index 117e56af62..edb336743c 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Correctness.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Correctness.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Correctness.yml similarity index 97% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Correctness.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Correctness.yml index 5f91aead18..54a116e5ce 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Correctness.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Correctness.yml @@ -3,16 +3,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "Find with $gt" diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Delete.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Delete.json similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Delete.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Delete.json index 40d8ed5bb2..6821c97939 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Delete.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -199,10 +201,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -246,10 +251,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -300,10 +308,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -327,12 +338,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Delete.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Delete.yml similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Delete.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Delete.yml index def2bcb67b..f926c7b56c 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Delete.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Delete.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Double. Delete." @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-FindOneAndUpdate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-FindOneAndUpdate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json index f0893ce661..298a4506cc 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-FindOneAndUpdate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -210,10 +212,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -257,10 +262,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -312,10 +320,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -339,12 +350,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -737,12 +742,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-FindOneAndUpdate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.yml similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-FindOneAndUpdate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.yml index 4bac3c1382..f8cfe40ab8 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-FindOneAndUpdate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Double. FindOneAndUpdate." @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -519,12 +511,6 @@ tests: "_id": 1, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-InsertFind.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-InsertFind.json similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-InsertFind.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-InsertFind.json index d3dc2f830c..0c6f9e9872 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-InsertFind.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -204,10 +206,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -251,10 +256,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -299,10 +307,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -326,12 +337,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -724,12 +729,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-InsertFind.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-InsertFind.yml similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-InsertFind.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-InsertFind.yml index 33b531f839..cea49c72d1 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-InsertFind.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-InsertFind.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Double. Insert and Find." @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -515,12 +507,6 @@ tests: "_id": 1, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Update.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Update.json similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Update.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Update.json index 9d6a1fbfdd..dabe8a0930 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Update.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -208,10 +210,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -255,10 +260,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -316,10 +324,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -343,12 +354,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -741,12 +746,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Update.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Update.yml similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Update.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Update.yml index 65f50aecd4..4c550854df 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Double-Update.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Double-Update.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoubleNoPrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Double. Update." @@ -135,12 +133,6 @@ tests: "_id": 0, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -532,12 +524,6 @@ tests: "_id": 1, "encryptedDoubleNoPrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Aggregate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Aggregate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json index 4188685a2c..8d434dc279 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Aggregate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -217,10 +219,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -273,10 +278,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -335,10 +343,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -371,12 +382,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -475,12 +480,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Aggregate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.yml similarity index 94% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Aggregate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.yml index 4c3b3d66bc..bd603467b6 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Aggregate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DoublePrecision. Aggregate." @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -227,12 +219,6 @@ tests: "_id": 1, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Correctness.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Correctness.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json index 60f1ea7a33..87d0e3dd8c 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Correctness.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Correctness.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.yml similarity index 97% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Correctness.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.yml index 6f3259f324..9c7a8d22f2 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Correctness.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.yml @@ -3,16 +3,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "Find with $gt" diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Delete.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Delete.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json index 4ed591d3f8..a9315dec96 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Delete.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -208,10 +210,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -264,10 +269,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -327,10 +335,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -363,12 +374,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Delete.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Delete.yml similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Delete.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Delete.yml index fa04447534..817d785c1e 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Delete.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Delete.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DoublePrecision. Delete." @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json index d8fbbfae73..28bebe0dbb 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -219,10 +221,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -275,10 +280,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -339,10 +347,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -375,12 +386,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -479,12 +484,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.yml similarity index 94% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.yml index 0f615d4b5e..c1aa8333b4 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DoublePrecision. FindOneAndUpdate." @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -225,12 +217,6 @@ tests: "_id": 1, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-InsertFind.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-InsertFind.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json index 4213b066d1..3b3176be6f 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-InsertFind.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -213,10 +215,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -269,10 +274,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -326,10 +334,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -362,12 +373,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -466,12 +471,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-InsertFind.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.yml similarity index 94% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-InsertFind.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.yml index 107151449d..17295c2e66 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-InsertFind.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DoublePrecision. Insert and Find." @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -221,12 +213,6 @@ tests: "_id": 1, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Update.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Update.json similarity index 96% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Update.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Update.json index 89eb4c338d..be2d0e9f4a 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Update.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -217,10 +219,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -273,10 +278,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -343,10 +351,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -379,12 +390,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -483,12 +488,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Update.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Update.yml similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Update.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Update.yml index b8ffbe9d4d..c0c794357f 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-DoublePrecision-Update.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-DoublePrecision-Update.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedDoublePrecision', 'bsonType': 'double', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberDouble': '0.0'}, 'max': {'$numberDouble': '200.0'}, 'precision': {'$numberInt': '2'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range DoublePrecision. Update." @@ -137,12 +135,6 @@ tests: "_id": 0, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -240,12 +232,6 @@ tests: "_id": 1, "encryptedDoublePrecision": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Aggregate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Aggregate.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Aggregate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Aggregate.json index 686f0241ba..c689dede18 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Aggregate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -214,10 +216,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -267,10 +272,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -326,10 +334,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -359,12 +370,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -421,12 +426,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Aggregate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Aggregate.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Aggregate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Aggregate.yml index 052a3006e4..f59c104d44 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Aggregate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Aggregate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Int. Aggregate." @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -185,12 +177,6 @@ tests: "_id": 1, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Correctness.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Correctness.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Correctness.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Correctness.json index 2964624f22..9dc4e4e501 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Correctness.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Correctness.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Correctness.yml similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Correctness.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Correctness.yml index f7e4c53dec..9cb1cb368f 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Correctness.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Correctness.yml @@ -3,16 +3,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "Find with $gt" diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Delete.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Delete.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Delete.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Delete.json index 531b3e7590..4a6b34a1dc 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Delete.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -205,10 +207,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -258,10 +263,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -318,10 +326,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -351,12 +362,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Delete.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Delete.yml similarity index 94% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Delete.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Delete.yml index ecc5eaa279..d2ef688a83 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Delete.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Delete.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Int. Delete." @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-FindOneAndUpdate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-FindOneAndUpdate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json index 402086cdb6..2bf905fa65 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-FindOneAndUpdate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -216,10 +218,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -269,10 +274,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -330,10 +338,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -363,12 +374,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -425,12 +430,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-FindOneAndUpdate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-FindOneAndUpdate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.yml index 9e878890fb..a27de9b7e5 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-FindOneAndUpdate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Int. FindOneAndUpdate." @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -183,12 +175,6 @@ tests: "_id": 1, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-InsertFind.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-InsertFind.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-InsertFind.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-InsertFind.json index 965b8a5516..a5eb4d60ec 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-InsertFind.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -210,10 +212,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -263,10 +268,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -317,10 +325,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -350,12 +361,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -412,12 +417,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-InsertFind.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-InsertFind.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-InsertFind.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-InsertFind.yml index 6e9594a1b6..fdb580cee9 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-InsertFind.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-InsertFind.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Int. Insert and Find." @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -179,12 +171,6 @@ tests: "_id": 1, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Update.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Update.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Update.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Update.json index 6cf44ac782..e826ea2acf 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Update.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -214,10 +216,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -267,10 +272,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -334,10 +342,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -367,12 +378,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -429,12 +434,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Update.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Update.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Update.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Update.yml index a98c1a659f..7a383aae4a 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Int-Update.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Int-Update.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Int. Update." @@ -137,12 +135,6 @@ tests: "_id": 0, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -198,12 +190,6 @@ tests: "_id": 1, "encryptedInt": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Aggregate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Aggregate.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Aggregate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Aggregate.json index 6edb38a800..d5020f5927 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Aggregate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -214,10 +216,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -267,10 +272,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -326,10 +334,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -359,12 +370,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -421,12 +426,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Aggregate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Aggregate.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Aggregate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Aggregate.yml index 5bc598daa5..8eb8f3615d 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Aggregate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Aggregate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Long. Aggregate." @@ -124,12 +122,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -185,12 +177,6 @@ tests: "_id": 1, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Correctness.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Correctness.json similarity index 99% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Correctness.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Correctness.json index 3d33f7381b..d81e0933f8 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Correctness.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Correctness.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Correctness.yml similarity index 98% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Correctness.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Correctness.yml index 01834f1c3b..97b7db2b71 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Correctness.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Correctness.yml @@ -3,16 +3,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "Find with $gt" diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Delete.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Delete.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Delete.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Delete.json index 1b32782010..3720d00341 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Delete.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -205,10 +207,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -258,10 +263,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -318,10 +326,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -351,12 +362,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Delete.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Delete.yml similarity index 94% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Delete.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Delete.yml index 617794a174..4f18efa1df 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Delete.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Delete.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Long. Delete." @@ -126,12 +124,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-FindOneAndUpdate.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-FindOneAndUpdate.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json index b8e3b888a8..5e4b5ae0de 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-FindOneAndUpdate.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -216,10 +218,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -269,10 +274,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -330,10 +338,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -363,12 +374,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -425,12 +430,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-FindOneAndUpdate.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-FindOneAndUpdate.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.yml index 1459ca106c..4e5a32994d 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-FindOneAndUpdate.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Long. FindOneAndUpdate." @@ -122,12 +120,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -183,12 +175,6 @@ tests: "_id": 1, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-InsertFind.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-InsertFind.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-InsertFind.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-InsertFind.json index d637fcf9e7..0d48580626 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-InsertFind.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -210,10 +212,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -263,10 +268,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -317,10 +325,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -350,12 +361,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -412,12 +417,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-InsertFind.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-InsertFind.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-InsertFind.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-InsertFind.yml index 578c08c247..c30106402a 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-InsertFind.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-InsertFind.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Long. Insert and Find." @@ -118,12 +116,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -179,12 +171,6 @@ tests: "_id": 1, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Update.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Update.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Update.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Update.json index 1b76019a4c..2d3321fd80 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Update.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -214,10 +216,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -267,10 +272,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -334,10 +342,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -367,12 +378,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -429,12 +434,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Update.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Update.yml similarity index 93% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Update.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Update.yml index db16c3dd64..81879398ea 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-Long-Update.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-Long-Update.yml @@ -1,16 +1,14 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} +encrypted_fields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedLong', 'bsonType': 'long', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberInt': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberLong': '0'}, 'max': {'$numberLong': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "FLE2 Range Long. Update." @@ -137,12 +135,6 @@ tests: "_id": 0, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -198,12 +190,6 @@ tests: "_id": 1, "encryptedLong": { $$type: "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-WrongType.json b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-WrongType.json similarity index 95% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-WrongType.json rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-WrongType.json index 704a693b8f..6215604508 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-WrongType.json +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-WrongType.json @@ -1,13 +1,13 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" ], - "maxServerVersion": "7.99.99" + "maxServerVersion": "8.99.99" } ], "database_name": "default", @@ -25,10 +25,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberLong": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/source/client-side-encryption/tests/legacy/fle2v2-Range-WrongType.yml b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-WrongType.yml similarity index 88% rename from source/client-side-encryption/tests/legacy/fle2v2-Range-WrongType.yml rename to source/client-side-encryption/tests/legacy/fle2v2-Rangev2-WrongType.yml index 9f1a93386a..432f86b423 100644 --- a/source/client-side-encryption/tests/legacy/fle2v2-Range-WrongType.yml +++ b/source/client-side-encryption/tests/legacy/fle2v2-Rangev2-WrongType.yml @@ -3,16 +3,15 @@ # Requires libmongocrypt 1.8.0. runOn: - - minServerVersion: "7.0.0" + - minServerVersion: "8.0.0" # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] - # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". - maxServerVersion: "7.99.99" + maxServerVersion: "8.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] -encrypted_fields: &encrypted_fields { 'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'rangePreview', 'contention': {'$numberLong': '0'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} +encrypted_fields: &encrypted_fields { 'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedInt', 'bsonType': 'int', 'queries': {'queryType': 'range', 'contention': {'$numberLong': '0'}, 'trimFactor': {'$numberLong': '1'}, 'sparsity': {'$numberLong': '1'}, 'min': {'$numberInt': '0'}, 'max': {'$numberInt': '200'}}}]} key_vault_data: [ {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} ] tests: - description: "Wrong type: Insert Double" diff --git a/source/client-side-encryption/tests/legacy/kmipKMS.json b/source/client-side-encryption/tests/legacy/kmipKMS.json index b0ad3e5cbb..349328b433 100644 --- a/source/client-side-encryption/tests/legacy/kmipKMS.json +++ b/source/client-side-encryption/tests/legacy/kmipKMS.json @@ -294,17 +294,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - } - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/source/client-side-encryption/tests/legacy/kmipKMS.yml b/source/client-side-encryption/tests/legacy/kmipKMS.yml index 21b1285ab1..51fa42cc73 100644 --- a/source/client-side-encryption/tests/legacy/kmipKMS.yml +++ b/source/client-side-encryption/tests/legacy/kmipKMS.yml @@ -55,18 +55,12 @@ tests: arguments: document: &doc1 { _id: 1, encrypted_string_kmip_delegated: "string0" } expectations: - - command_started_event: - command: - listCollections: 1 - filter: - name: *collection_name - command_name: listCollections # Auto encryption will request the collection info. - command_started_event: command: listCollections: 1 filter: - name: datakeys + name: *collection_name command_name: listCollections # Then key is fetched from the key vault. - command_started_event: diff --git a/source/client-side-encryption/tests/legacy/timeoutMS.json b/source/client-side-encryption/tests/legacy/timeoutMS.json index 443aa0aa23..b667767cfc 100644 --- a/source/client-side-encryption/tests/legacy/timeoutMS.json +++ b/source/client-side-encryption/tests/legacy/timeoutMS.json @@ -161,7 +161,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 3 + "times": 2 }, "data": { "failCommands": [ @@ -169,7 +169,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 20 + "blockTimeMS": 30 } }, "clientOptions": { diff --git a/source/client-side-encryption/tests/legacy/timeoutMS.yml b/source/client-side-encryption/tests/legacy/timeoutMS.yml index 33321ad64d..bb71d67650 100644 --- a/source/client-side-encryption/tests/legacy/timeoutMS.yml +++ b/source/client-side-encryption/tests/legacy/timeoutMS.yml @@ -38,8 +38,10 @@ tests: command_name: listCollections # Test that timeoutMS applies to the sum of all operations done for client-side encryption. This is done by blocking - # listCollections and find for 20ms each and running an insertOne with timeoutMS=50. There should be two - # listCollections commands and one "find" command, so the sum should take more than timeoutMS. + # listCollections and find for 30ms each and running an insertOne with timeoutMS=50. There should be one + # listCollections command and one "find" command, so the sum should take more than timeoutMS. A second listCollections + # event doesn't occur due to the internal MongoClient lacking configured auto encryption, plus libmongocrypt holds the + # collection schema in cache for a minute. # # This test does not include command monitoring expectations because the exact command sequence is dependent on the # amount of time taken by mongocryptd communication. In slow runs, mongocryptd communication can breach the timeout @@ -47,11 +49,11 @@ tests: - description: "remaining timeoutMS applied to find to get keyvault data" failPoint: configureFailPoint: failCommand - mode: { times: 3 } + mode: { times: 2 } data: failCommands: ["listCollections", "find"] blockConnection: true - blockTimeMS: 20 + blockTimeMS: 30 clientOptions: autoEncryptOpts: kmsProviders: diff --git a/source/client-side-operations-timeout/client-side-operations-timeout.md b/source/client-side-operations-timeout/client-side-operations-timeout.md index 83bbcd6d30..a884730acd 100644 --- a/source/client-side-operations-timeout/client-side-operations-timeout.md +++ b/source/client-side-operations-timeout/client-side-operations-timeout.md @@ -209,7 +209,7 @@ See [serverSelectionTimeoutMS is not deprecated](#serverselectiontimeoutms-is-no If `timeoutMS` is set, drivers MUST append a `maxTimeMS` field to commands executed against a MongoDB server using the `minRoundTripTime` field of the selected server. Note that this value MUST be retrieved during server selection using the `servers` field of the same -[TopologyDescription](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#TopologyDescription) that +[TopologyDescription](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#TopologyDescription) that was used for selection before the selected server's description can be modified. Otherwise, drivers may be subject to a race condition where a server is reset to the default description (e.g. due to an error in the monitoring thread) after it has been selected but before the RTT is retrieved. @@ -363,8 +363,8 @@ See [Change stream behavior](#change-stream-behavior). ### Sessions -The [SessionOptions](../sessions/driver-sessions.rst#mongoclient-changes) used to construct explicit -[ClientSession](../sessions/driver-sessions.rst#clientsession) instances MUST accept a new `defaultTimeoutMS` option, +The [SessionOptions](../sessions/driver-sessions.md#mongoclient-changes) used to construct explicit +[ClientSession](../sessions/driver-sessions.md#clientsession) instances MUST accept a new `defaultTimeoutMS` option, which specifies the `timeoutMS` value for the following operations executed on the session: 1. commitTransaction diff --git a/source/client-side-operations-timeout/tests/README.md b/source/client-side-operations-timeout/tests/README.md index b4160500f5..a960c2de21 100644 --- a/source/client-side-operations-timeout/tests/README.md +++ b/source/client-side-operations-timeout/tests/README.md @@ -24,7 +24,7 @@ test MUST be unset using `internalClient` after the test has been executed. All MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to listen for `command_started` events. -### 1. Multi-batch writes +### 1. Multi-batch inserts This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow @@ -598,6 +598,49 @@ Tests in this section MUST only run against replica sets and sharded clusters wi 1. `command_started` and `command_failed` events for an `insert` command. 2. `command_started` and `command_failed` events for an `abortTransaction` command. +### 11. Multi-batch bulkWrites + +This test MUST only run against server versions 8.0+. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["bulkWrite"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + in the response. + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +5. Create a list of write models (referred to as `models`) with the following write model repeated + (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + + ```json + InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + } + ``` + +6. Call `bulkWrite` on `client` with `models`. + + - Expect this to fail with a timeout error. + +7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + ## Unit Tests The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement diff --git a/source/client-side-operations-timeout/tests/close-cursors.json b/source/client-side-operations-timeout/tests/close-cursors.json index a8b2d724fa..79b0de7b6a 100644 --- a/source/client-side-operations-timeout/tests/close-cursors.json +++ b/source/client-side-operations-timeout/tests/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 200 + "blockTimeMS": 250 } } } @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], diff --git a/source/command-logging-and-monitoring/tests/logging/unacknowledged-write.json b/source/command-logging-and-monitoring/tests/logging/unacknowledged-write.json index dad0c0a36a..0d33c020d5 100644 --- a/source/command-logging-and-monitoring/tests/logging/unacknowledged-write.json +++ b/source/command-logging-and-monitoring/tests/logging/unacknowledged-write.json @@ -5,6 +5,7 @@ { "client": { "id": "client", + "useMultipleMongoses": false, "observeLogMessages": { "command": "debug" } diff --git a/source/command-logging-and-monitoring/tests/logging/unacknowledged-write.yml b/source/command-logging-and-monitoring/tests/logging/unacknowledged-write.yml index 16ee0e3cc6..6409fb408c 100644 --- a/source/command-logging-and-monitoring/tests/logging/unacknowledged-write.yml +++ b/source/command-logging-and-monitoring/tests/logging/unacknowledged-write.yml @@ -5,6 +5,7 @@ schemaVersion: "1.16" createEntities: - client: id: &client client + useMultipleMongoses: false observeLogMessages: command: debug - database: diff --git a/source/command-logging-and-monitoring/tests/monitoring/unacknowledged-client-bulkWrite.json b/source/command-logging-and-monitoring/tests/monitoring/unacknowledged-client-bulkWrite.json new file mode 100644 index 0000000000..1099b6a1e9 --- /dev/null +++ b/source/command-logging-and-monitoring/tests/monitoring/unacknowledged-client-bulkWrite.json @@ -0,0 +1,218 @@ +{ + "description": "unacknowledged-client-bulkWrite", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ], + "uriOptions": { + "w": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "tests": [ + { + "description": "A successful mixed client bulkWrite", + "operations": [ + { + "object": "client", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "command-monitoring-tests.test", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "command-monitoring-tests.test", + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ] + }, + "expectResult": { + "insertedCount": { + "$$unsetOrMatches": 0 + }, + "upsertedCount": { + "$$unsetOrMatches": 0 + }, + "matchedCount": { + "$$unsetOrMatches": 0 + }, + "modifiedCount": { + "$$unsetOrMatches": 0 + }, + "deletedCount": { + "$$unsetOrMatches": 0 + }, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 3 + }, + "updateMods": { + "$set": { + "x": 333 + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "command-monitoring-tests.test" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite", + "reply": { + "ok": 1, + "nInserted": { + "$$exists": false + }, + "nMatched": { + "$$exists": false + }, + "nModified": { + "$$exists": false + }, + "nUpserted": { + "$$exists": false + }, + "nDeleted": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/source/command-logging-and-monitoring/tests/monitoring/unacknowledged-client-bulkWrite.yml b/source/command-logging-and-monitoring/tests/monitoring/unacknowledged-client-bulkWrite.yml new file mode 100644 index 0000000000..fcc6b7b3ec --- /dev/null +++ b/source/command-logging-and-monitoring/tests/monitoring/unacknowledged-client-bulkWrite.yml @@ -0,0 +1,109 @@ +description: "unacknowledged-client-bulkWrite" + +schemaVersion: "1.7" + +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + uriOptions: + w: 0 + - database: + id: &database database + client: *client + databaseName: &databaseName command-monitoring-tests + - collection: + id: &collection collection + database: *database + collectionName: &collectionName test + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "command-monitoring-tests.test" + +tests: + - description: 'A successful mixed client bulkWrite' + operations: + - object: *client + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 3 } + update: { $set: { x: 333 } } + expectResult: + insertedCount: + $$unsetOrMatches: 0 + upsertedCount: + $$unsetOrMatches: 0 + matchedCount: + $$unsetOrMatches: 0 + modifiedCount: + $$unsetOrMatches: 0 + deletedCount: + $$unsetOrMatches: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + # Force completion of the w:0 write by executing a find on the same connection + - object: *collection + name: find + arguments: + filter: {} + expectResult: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 333 } + - { _id: 4, x: 44 } + + expectEvents: + - + client: *client + ignoreExtraEvents: true + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 3 } + updateMods: { $set: { x: 333 } } + multi: false + nsInfo: + - ns: *namespace + - commandSucceededEvent: + commandName: bulkWrite + reply: + ok: 1 + nInserted: { $$exists: false } + nMatched: { $$exists: false } + nModified: { $$exists: false } + nUpserted: { $$exists: false } + nDeleted: { $$exists: false } diff --git a/source/command-logging-and-monitoring/tests/monitoring/unacknowledgedBulkWrite.json b/source/command-logging-and-monitoring/tests/monitoring/unacknowledgedBulkWrite.json index 782cb84a5b..78ddde767f 100644 --- a/source/command-logging-and-monitoring/tests/monitoring/unacknowledgedBulkWrite.json +++ b/source/command-logging-and-monitoring/tests/monitoring/unacknowledgedBulkWrite.json @@ -5,6 +5,7 @@ { "client": { "id": "client", + "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent", "commandSucceededEvent", @@ -70,17 +71,7 @@ "object": "collection", "arguments": { "filter": {} - }, - "expectResult": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 - } - ] + } } ], "expectEvents": [ diff --git a/source/command-logging-and-monitoring/tests/monitoring/unacknowledgedBulkWrite.yml b/source/command-logging-and-monitoring/tests/monitoring/unacknowledgedBulkWrite.yml index e512b2eb56..c526fab325 100644 --- a/source/command-logging-and-monitoring/tests/monitoring/unacknowledgedBulkWrite.yml +++ b/source/command-logging-and-monitoring/tests/monitoring/unacknowledgedBulkWrite.yml @@ -5,6 +5,7 @@ schemaVersion: "1.7" createEntities: - client: id: &client client + useMultipleMongoses: false observeEvents: - commandStartedEvent - commandSucceededEvent @@ -41,10 +42,6 @@ tests: object: *collection arguments: filter: { } - expectResult: [ - { _id: 1, x: 11 }, - { _id: "unorderedBulkWriteInsertW0", x: 44 } - ] expectEvents: - client: *client ignoreExtraEvents: true diff --git a/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.md b/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.md index 00ca2899ff..7fdd4109aa 100644 --- a/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.md +++ b/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.md @@ -380,13 +380,13 @@ interface ConnectionPool { This specification does not define how a pool is to be created, leaving it up to the driver. Creation of a connection pool is generally an implementation detail of the driver, i.e., is not a part of the public API of the driver. The SDAM specification defines -[when](https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#connection-pool-creation) -the driver should create connection pools. +[when](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#connection-pool-creation) the driver should +create connection pools. When a pool is created, its state MUST initially be set to "paused". Even if minPoolSize is set, the pool MUST NOT begin being [populated](#populating-the-pool-with-a-connection-internal-implementation) with [Connections](#connection) until it has been marked as "ready". SDAM will mark the pool as "ready" on each successful check. See -[Connection Pool Management](/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#connection-pool-management) +[Connection Pool Management](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#connection-pool-management) section in the SDAM specification for more information. ``` @@ -508,8 +508,8 @@ Populating the pool MUST NOT block any application threads. For example, it coul via the use of non-blocking/async I/O. Populating the pool MUST NOT be performed unless the pool is "ready". If an error is encountered while populating a connection, it MUST be handled via the SDAM machinery according to the -[Application Errors](/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#application-errors) -section in the SDAM specification. +[Application Errors](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#application-errors) section +in the SDAM specification. If minPoolSize is set, the [Connection](#connection) Pool MUST be populated until it has at least minPoolSize total [Connections](#connection). This MUST occur only while the pool is "ready". If the pool implements a background thread, @@ -1265,8 +1265,8 @@ longer full, it is immediately filled. It is not a favorable situation to be in, guarantee that the waitQueue normally provides. Because of these issues, it does not make sense to -[go against driver mantras and provide an additional knob](../../README.md#). We may eventually pursue an alternative -configurations to address wait queue size in [Advanced Pooling Behaviors](#advanced-pooling-behaviors). +[go against driver mantras and provide an additional knob](../driver-mantras.md#). We may eventually pursue an +alternative configuration to address wait queue size in [Advanced Pooling Behaviors](#advanced-pooling-behaviors). Users that wish to have this functionality can achieve similar results by utilizing other methods to limit concurrency. Examples include implementing either a thread pool or an operation queue with a capped size in the user application. diff --git a/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst b/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst index 2b3b3a0a90..18d7f87d9f 100644 --- a/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst +++ b/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst @@ -1,3 +1,3 @@ .. note:: This specification has been converted to Markdown and renamed to - `connection-monitoring-and-pooling.md `_. + `connection-monitoring-and-pooling.md `_. diff --git a/source/connection-string/connection-string-spec.md b/source/connection-string/connection-string-spec.md index 5d051f4e47..5ee78434a4 100644 --- a/source/connection-string/connection-string-spec.md +++ b/source/connection-string/connection-string-spec.md @@ -216,13 +216,26 @@ The values in connection options MUST be URL decoded by the parser. The values c ``` - Key value pairs: A value that represents one or more key and value pairs. Multiple key value pairs are delimited by a - comma (","). The key is everything up to the first colon sign (":") and the value is everything afterwards. If any - keys or values containing a comma (",") or a colon (":") they must be URL encoded. For example: + comma (","). The key is everything up to the first colon sign (":") and the value is everything afterwards. + + For example: ``` ?readPreferenceTags=dc:ny,rack:1 ``` + Drivers MUST handle unencoded colon signs (":") within the value. For example, given the connection string option: + + ``` + authMechanismProperties=TOKEN_RESOURCE:mongodb://foo + ``` + + the driver MUST interpret the key as `TOKEN_RESOURCE` and the value as `mongodb://foo`. + + For any option key-value pair that may contain a comma (such as `TOKEN_RESOURCE`), drivers MUST document that: a value + containing a comma (",") MUST NOT be provided as part of the connection string. This prevents use of values that would + interfere with parsing. + Any invalid Values for a given key MUST be ignored and MUST log a WARN level message. For example: ``` @@ -232,8 +245,7 @@ Unsupported value for "fsync" : "ifPossible" ### Repeated Keys If a key is repeated and the corresponding data type is not a List then the precedence of which key value pair will be -used is undefined except where defined otherwise by the -[URI options spec](https://github.com/mongodb/specifications/blob/master/source/uri-options/uri-options.rst). +used is undefined except where defined otherwise by the [URI options spec](../uri-options/uri-options.md). Where possible, a warning SHOULD be raised to inform the user that multiple options were found for the same value. @@ -445,6 +457,8 @@ many languages treat strings as `x-www-form-urlencoded` data by default. ## Changelog +- 2024-05-29: Clarify handling of key-value pairs and add specification test. + - 2024-02-15: Migrated from reStructuredText to Markdown. - 2016-07-22: In Port section, clarify that zero is not an acceptable port. diff --git a/source/connection-string/tests/invalid-uris.yml b/source/connection-string/tests/invalid-uris.yml index 79e110c79d..dd4d4ce31c 100644 --- a/source/connection-string/tests/invalid-uris.yml +++ b/source/connection-string/tests/invalid-uris.yml @@ -249,5 +249,3 @@ tests: hosts: ~ auth: ~ options: ~ - - diff --git a/source/connection-string/tests/valid-auth.json b/source/connection-string/tests/valid-auth.json index 176a54a096..60f63f4e3f 100644 --- a/source/connection-string/tests/valid-auth.json +++ b/source/connection-string/tests/valid-auth.json @@ -220,29 +220,8 @@ "options": null }, { - "description": "Escaped user info and database (MONGODB-CR)", - "uri": "mongodb://%24am:f%3Azzb%40z%2Fz%3D@127.0.0.1/admin%3F?authMechanism=MONGODB-CR", - "valid": true, - "warning": false, - "hosts": [ - { - "type": "ipv4", - "host": "127.0.0.1", - "port": null - } - ], - "auth": { - "username": "$am", - "password": "f:zzb@z/z=", - "db": "admin?" - }, - "options": { - "authmechanism": "MONGODB-CR" - } - }, - { - "description": "Subdelimiters in user/pass don't need escaping (MONGODB-CR)", - "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=MONGODB-CR", + "description": "Subdelimiters in user/pass don't need escaping (PLAIN)", + "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=PLAIN", "valid": true, "warning": false, "hosts": [ @@ -258,7 +237,7 @@ "db": "admin" }, "options": { - "authmechanism": "MONGODB-CR" + "authmechanism": "PLAIN" } }, { diff --git a/source/connection-string/tests/valid-auth.yml b/source/connection-string/tests/valid-auth.yml index f40c748fa6..02ed287428 100644 --- a/source/connection-string/tests/valid-auth.yml +++ b/source/connection-string/tests/valid-auth.yml @@ -173,24 +173,8 @@ tests: db: "my=db" options: ~ - - description: "Escaped user info and database (MONGODB-CR)" - uri: "mongodb://%24am:f%3Azzb%40z%2Fz%3D@127.0.0.1/admin%3F?authMechanism=MONGODB-CR" - valid: true - warning: false - hosts: - - - type: "ipv4" - host: "127.0.0.1" - port: ~ - auth: - username: "$am" - password: "f:zzb@z/z=" - db: "admin?" - options: - authmechanism: "MONGODB-CR" - - - description: "Subdelimiters in user/pass don't need escaping (MONGODB-CR)" - uri: "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=MONGODB-CR" + description: "Subdelimiters in user/pass don't need escaping (PLAIN)" + uri: "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=PLAIN" valid: true warning: false hosts: @@ -203,7 +187,7 @@ tests: password: "!$&'()*+,;=" db: "admin" options: - authmechanism: "MONGODB-CR" + authmechanism: "PLAIN" - description: "Escaped username (MONGODB-X509)" uri: "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509" diff --git a/source/connection-string/tests/valid-options.json b/source/connection-string/tests/valid-options.json index 01bc2264bb..6c86172d08 100644 --- a/source/connection-string/tests/valid-options.json +++ b/source/connection-string/tests/valid-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Option names are normalized to lowercase", - "uri": "mongodb://alice:secret@example.com/admin?AUTHMechanism=MONGODB-CR", + "uri": "mongodb://alice:secret@example.com/admin?AUTHMechanism=PLAIN", "valid": true, "warning": false, "hosts": [ @@ -18,7 +18,7 @@ "db": "admin" }, "options": { - "authmechanism": "MONGODB-CR" + "authmechanism": "PLAIN" } }, { @@ -37,6 +37,25 @@ "options": { "tls": true } + }, + { + "description": "Colon in a key value pair", + "uri": "mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "authmechanismProperties": { + "TOKEN_RESOURCE": "mongodb://test-cluster" + } + } } ] } diff --git a/source/connection-string/tests/valid-options.yml b/source/connection-string/tests/valid-options.yml index 8cb0dea3a6..86523c7f39 100644 --- a/source/connection-string/tests/valid-options.yml +++ b/source/connection-string/tests/valid-options.yml @@ -1,7 +1,7 @@ tests: - description: "Option names are normalized to lowercase" - uri: "mongodb://alice:secret@example.com/admin?AUTHMechanism=MONGODB-CR" + uri: "mongodb://alice:secret@example.com/admin?AUTHMechanism=PLAIN" valid: true warning: false hosts: @@ -14,7 +14,7 @@ tests: password: "secret" db: "admin" options: - authmechanism: "MONGODB-CR" + authmechanism: "PLAIN" - description: "Missing delimiting slash between hosts and options" uri: "mongodb://example.com?tls=true" @@ -28,3 +28,17 @@ tests: auth: ~ options: tls: true + - + description: Colon in a key value pair + uri: mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster + valid: true + warning: false + hosts: + - + type: hostname + host: example.com + port: ~ + auth: ~ + options: + authmechanismProperties: + TOKEN_RESOURCE: 'mongodb://test-cluster' \ No newline at end of file diff --git a/source/connection-string/tests/valid-warnings.json b/source/connection-string/tests/valid-warnings.json index 1eacbf8fcb..f0e8288bc7 100644 --- a/source/connection-string/tests/valid-warnings.json +++ b/source/connection-string/tests/valid-warnings.json @@ -93,6 +93,21 @@ ], "auth": null, "options": null + }, + { + "description": "Comma in a key value pair causes a warning", + "uri": "mongodb://localhost?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null } ] } diff --git a/source/connection-string/tests/valid-warnings.yml b/source/connection-string/tests/valid-warnings.yml index ea9cc9d1ed..bdc64c5916 100644 --- a/source/connection-string/tests/valid-warnings.yml +++ b/source/connection-string/tests/valid-warnings.yml @@ -73,3 +73,15 @@ tests: port: ~ auth: ~ options: ~ + - + description: Comma in a key value pair causes a warning + uri: mongodb://localhost?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2 + valid: true + warning: true + hosts: + - + type: "hostname" + host: "localhost" + port: ~ + auth: ~ + options: ~ diff --git a/source/crud/bulk-write.md b/source/crud/bulk-write.md new file mode 100644 index 0000000000..153800b47a --- /dev/null +++ b/source/crud/bulk-write.md @@ -0,0 +1,857 @@ +# Bulk Write + +- Status: Accepted +- Minimum Server Version: 8.0 + +## Abstract + +This specification defines the driver API for the `bulkWrite` server command introduced in MongoDB 8.0. The API defined +in this specification allows users to perform insert, update, and delete operations against mixed namespaces in a +minimized number of round trips, and to receive detailed results for each operation performed. This API is distinct from +the [collection-level bulkWrite method](../crud/crud.md#insert-update-replace-delete-and-bulk-writes) defined in the +CRUD specification and the [deprecated bulk write specification](../driver-bulk-update.rst). + +## Specification + +> [!NOTE] +> The `BulkWriteOptions`, `BulkWriteResult`, and `BulkWriteException` types defined in this specification are similar to +> those used for the `MongoCollection.bulkWrite` method. Statically typed drivers MUST NOT reuse their existing +> definitions for these types for the `MongoClient.bulkWrite` API and MUST introduce new types. If naming conflicts +> arise, drivers SHOULD prepend "Client" to the new type names (e.g. `ClientBulkWriteOptions`). + +### `MongoClient.bulkWrite` Interface + +```typescript +interface MongoClient { + /** + * Executes a list of mixed write operations. + * + * @throws BulkWriteException + */ + bulkWrite(models: NamespaceWriteModelPair[], options: Optional): BulkWriteResult; +} +``` + +### Write Models + +A `WriteModel` defines a single write operation to be performed as part of a bulk write. + +```typescript +/** + * Unifying interface for the various write model types. Drivers may also use an enum with + * variants for each write model for this type. + */ +interface WriteModel {} + +class InsertOneModel implements WriteModel { + /** + * The document to insert. + */ + document: Document; +} + +class UpdateOneModel implements WriteModel { + /** + * The filter to apply. + */ + filter: Document; + + /** + * The update document or pipeline to apply to the selected document. + */ + update: (Document | Document[]); + + /** + * A set of filters specifying to which array elements an update should apply. + * + * This option is sent only if the caller explicitly provides a value. + */ + arrayFilters: Optional; + + /** + * Specifies a collation. + * + * This option is sent only if the caller explicitly provides a value. + */ + collation: Optional; + + /** + * The index to use. Specify either the index name as a string or the index key pattern. If + * specified, then the query system will only consider plans using the hinted index. + * + * This option is only sent if the caller explicitly provides a value. + */ + hint: Optional; + + /** + * When true, creates a new document if no document matches the query. + * + * This option is only sent if the caller explicitly provides a value. The server's default + * value is false. + */ + upsert: Optional; +} + +class UpdateManyModel implements WriteModel { + /** + * The filter to apply. + */ + filter: Document; + + /** + * The update document or pipeline to apply to the selected documents. + */ + update: (Document | Document[]); + + /** + * A set of filters specifying to which array elements an update should apply. + * + * This option is sent only if the caller explicitly provides a value. + */ + arrayFilters: Optional; + + /** + * Specifies a collation. + * + * This option is sent only if the caller explicitly provides a value. + */ + collation: Optional; + + /** + * The index to use. Specify either the index name as a string or the index key pattern. If + * specified, then the query system will only consider plans using the hinted index. + * + * This option is only sent if the caller explicitly provides a value. + */ + hint: Optional; + + /** + * When true, creates a new document if no document matches the query. + * + * This option is only sent if the caller explicitly provides a value. The server's default + * value is false. + */ + upsert: Optional; +} + +class ReplaceOneModel implements WriteModel { + /** + * The filter to apply. + */ + filter: Document; + + /** + * The replacement document. + */ + replacement: Document; + + /** + * Specifies a collation. + * + * This option is sent only if the caller explicitly provides a value. + */ + collation: Optional; + + /** + * The index to use. Specify either the index name as a string or the index key pattern. If + * specified, then the query system will only consider plans using the hinted index. + * + * This option is only sent if the caller explicitly provides a value. + */ + hint: Optional; + + /** + * When true, creates a new document if no document matches the query. + * + * This option is only sent if the caller explicitly provides a value. The server's default + * value is false. + */ + upsert: Optional; +} + +class DeleteOneModel implements WriteModel { + /** + * The filter to apply. + */ + filter: Document; + + /** + * Specifies a collation. + * + * This option is sent only if the caller explicitly provides a value. + */ + collation: Optional; + + /** + * The index to use. Specify either the index name as a string or the index key pattern. If + * specified, then the query system will only consider plans using the hinted index. + * + * This option is only sent if the caller explicitly provides a value. + */ + hint: Optional; +} + +class DeleteManyModel implements WriteModel { + /** + * The filter to apply. + */ + filter: Document; + + /** + * Specifies a collation. + * + * This option is sent only if the caller explicitly provides a value. + */ + collation: Optional; + + /** + * The index to use. Specify either the index name as a string or the index key pattern. If + * specified, then the query system will only consider plans using the hinted index. + * + * This option is only sent if the caller explicitly provides a value. + */ + hint: Optional; +} +``` + +Each write model provided to `MongoClient.bulkWrite` in the `models` parameter MUST have a corresponding namespace that +defines the collection on which the operation should be performed. Drivers SHOULD design this pairing in whichever way +is most idiomatic for its language. For example, drivers may: + +- Include a required `namespace` field on each `WriteModel` variant and accept a list of `WriteModel` objects for the + `models` parameter. +- Accept a list of `(Namespace, WriteModel)` tuples for `models`. +- Define the following pair class: + +```typescript +class NamespaceWriteModelPair { + /** + * The namespace on which to perform the write. + */ + namespace: Namespace; + + /** + * The write to perform. + */ + model: WriteModel; +} +``` + +Drivers MUST throw an exception if the list provided for `models` is empty. + +#### Update vs. replace document validation + +Update documents provided in `UpdateOne` and `UpdateMany` write models are required only to contain atomic modifiers +(i.e. keys that start with "$"). Drivers MUST throw an error if an update document is empty or if the document's first +key does not start with "$". Drivers MUST rely on the server to return an error if any other entries in the update +document are not atomic modifiers. Drivers are not required to perform validation on update pipelines. + +Replacement documents provided in `ReplaceOne` write models are required not to contain atomic modifiers. Drivers MUST +throw an error if a replacement document is nonempty and its first key starts with "$". Drivers MUST rely on the server +to return an error if any other entries in the replacement document are atomic modifiers. + +### Options + +```typescript +class BulkWriteOptions { + /** + * Whether the operations in this bulk write should be executed in the order in which they were + * specified. If false, writes will continue to be executed if an individual write fails. If + * true, writes will stop executing if an individual write fails. + * + * Defaults to true. + */ + ordered: Optional; + + /** + * If true, allows the writes to opt out of document-level validation. + * + * This option is only sent if the caller explicitly provides a value. The server's default + * value is false. + */ + bypassDocumentValidation: Optional; + + /** + * A map of parameter names and values to apply to all operations within the bulk write. Value + * must be constant or closed expressions that do not reference document fields. Parameters can + * then be accessed as variables in an aggregate expression context (e.g. "$$var"). + * + * This option is only sent if the caller explicitly provides a value. + */ + let: Optional; + + /** + * The write concern to use for this bulk write. + */ + writeConcern: Optional; + + /** + * Enables users to specify an arbitrary comment to help trace the operation through + * the database profiler, currentOp and logs. + * + * This option is only sent if the caller explicitly provides a value. + */ + comment: Optional; + + /** + * Whether detailed results for each successful operation should be included in the returned + * BulkWriteResult. + * + * Defaults to false. This value corresponds inversely to the errorsOnly field in the bulkWrite + * command. + */ + verboseResults: Optional; +} +``` + +### Result + +```typescript +class BulkWriteResult { + /** + * Indicates whether this write result was acknowledged. If not, then all other members of this + * result will be undefined. + * + * NOT REQUIRED TO IMPLEMENT. See below for more guidance on modeling unacknowledged results. + */ + acknowledged: Boolean; + + /** + * Indicates whether the results are verbose. If false, the insertResults, updateResults, and + * deleteResults fields in this result will be undefined. + * + * NOT REQUIRED TO IMPLEMENT. See below for other ways to differentiate summary results from + * verbose results. + */ + hasVerboseResults: Boolean; + + /** + * The total number of documents inserted across all insert operations. + */ + insertedCount: Int64; + + /** + * The total number of documents upserted across all update operations. + */ + upsertedCount: Int64; + + /** + * The total number of documents matched across all update operations. + */ + matchedCount: Int64; + + /** + * The total number of documents modified across all update operations. + */ + modifiedCount: Int64; + + /** + * The total number of documents deleted across all delete operations. + */ + deletedCount: Int64; + + /** + * The results of each individual insert operation that was successfully performed. + */ + insertResults: Map; + + /** + * The results of each individual update operation that was successfully performed. + */ + updateResults: Map; + + /** + * The results of each individual delete operation that was successfully performed. + */ + deleteResults: Map; +} + +class InsertOneResult { + /** + * The _id of the inserted document. + */ + insertedId: Any; +} + +class UpdateResult { + /** + * The number of documents that matched the filter. + */ + matchedCount: Int64; + + /** + * The number of documents that were modified. + */ + modifiedCount: Int64; + + /** + * The _id field of the upserted document if an upsert occurred. + * + * It MUST be possible to discern between a BSON Null upserted ID value and this field being + * unset. If necessary, drivers MAY add a didUpsert boolean field to differentiate between + * these two cases. + */ + upsertedId: Optional; +} + +class DeleteResult { + /** + * The number of documents that were deleted. + */ + deletedCount: Int64; +} +``` + +#### Unacknowledged results + +`BulkWriteResult` has an optional `acknowledged` field to indicate whether the result was acknowledged. This is not +required to implement. Drivers should follow the guidance in the CRUD specification +[here](../crud/crud.md#write-results) to determine how to model unacknowledged results. + +#### Summary vs. verbose results + +Users MUST be able to discern whether a `BulkWriteResult` contains summary or verbose results without inspecting the +value provided for `verboseResults` in `BulkWriteOptions`. Drivers MUST implement this in one of the following ways: + +- Expose the `hasVerboseResults` field in `BulkWriteResult` as defined above. Document that `insertResults`, + `updateResults`, and `deleteResults` will be undefined when `hasVerboseResults` is false. Raise an error if a user + tries to access one of these fields when `hasVerboseResults` is false. +- Implement the `insertResults`, `updateResults`, and `deleteResults` fields as optional types and document that they + will be unset when `verboseResults` is false. +- Introduce separate `SummaryBulkWriteResult` and `VerboseBulkWriteResult` types. `VerboseBulkWriteResult` MUST have all + of the required fields defined on `BulkWriteResult` above. `SummaryBulkWriteResult` MUST have all of the required + fields defined on `BulkWriteResult` above except `insertResults`, `updateResults`, and `deleteResults`. + +#### Individual results + +The `InsertOneResult`, `UpdateResult`, and `DeleteResult` classes are the same as or similar to types of the same name +defined in the [CRUD specification](crud.md). Drivers MUST redefine these classes if their existing result classes +deviate from the definitions in this specification (e.g. if they contain acknowledgement information, which is not +applicable for individual bulk write operations). Drivers MAY reuse their existing types for these classes if they match +the ones defined here exactly. + +### Exception + +```typescript +class BulkWriteException { + /** + * A top-level error that occurred when attempting to communicate with the server or execute + * the bulk write. This value may not be populated if the exception was thrown due to errors + * occurring on individual writes. + */ + error: Optional; + + /** + * Write concern errors that occurred while executing the bulk write. This list may have + * multiple items if more than one server command was required to execute the bulk write. + */ + writeConcernErrors: WriteConcernError[]; + + /** + * Errors that occurred during the execution of individual write operations. This map will + * contain at most one entry if the bulk write was ordered. + */ + writeErrors: Map; + + /** + * The results of any successful operations that were performed before the error was + * encountered. + */ + partialResult: Optional; +} +``` + +## Building a `bulkWrite` Command + +The `bulkWrite` server command has the following format: + +```javascript +{ + "bulkWrite": 1, + "ops": , + "nsInfo": , + "errorsOnly": Optional, + "ordered": Optional, + "bypassDocumentValidation": Optional, + "comment": Optional, + "let": Optional, + ...additional operation-agnostic fields +} +``` + +Drivers MUST use document sequences ([`OP_MSG`](../message/OP_MSG.rst) payload type 1) for the `ops` and `nsInfo` +fields. + +The `bulkWrite` command is executed on the "admin" database. + +### Operations + +The `ops` field is a list of write operation documents. The first entry in each document has the name of the operation +(i.e. "insert", "update", or "delete") as its key and the index in the `nsInfo` array of the namespace on which the +operation should be performed as its value. The documents have the following format: + +#### Insert + +```javascript +{ + "insert": , + "document": +} +``` + +If the document to be inserted does not contain an `_id` field, drivers MUST generate a new +[`ObjectId`](../objectid.rst) and add it as the `_id` field at the beginning of the document. + +#### Update + +```javascript +{ + "update": , + "filter": , + "updateMods": , + "multi": Optional, + "upsert": Optional, + "arrayFilters": Optional, + "hint": Optional +} +``` + +#### Delete + +```javascript +{ + "delete": , + "filter": , + "multi": Optional, + "hint": Optional, + "collation": Optional +} +``` + +### Namespace Information + +The `nsInfo` field is an array containing the namespaces on which the write operations should be performed. Drivers MUST +NOT include duplicate namespaces in this list. The documents in the `nsInfo` array have the following format: + +```javascript +{ + "ns": +} +``` + +### `errorsOnly` and `verboseResults` + +The `errorsOnly` field indicates whether the results cursor returned in the `bulkWrite` response should contain only +errors and omit individual results. If false, both individual results for successful operations and errors will be +returned. This field is optional and defaults to false on the server. + +`errorsOnly` corresponds inversely to the `verboseResults` option defined on `BulkWriteOptions`. If the user specified a +value for `verboseResults`, drivers MUST define `errorsOnly` as the opposite of `verboseResults`. If the user did not +specify a value for `verboseResults`, drivers MUST define `errorsOnly` as `true`. + +### `ordered` + +The `ordered` field defines whether writes should be executed in the order in which they were specified, and, if an +error occurs, whether the server should halt execution of further writes. It is optional and defaults to true on the +server. Drivers MUST explicitly define `ordered` as `true` in the `bulkWrite` command if a value is not specified in +`BulkWriteOptions`. This is required to avoid inconsistencies between server and driver behavior if the server default +changes in the future. + +### Size Limits + +The server reports a `maxBsonObjectSize` in its `hello` response. This value defines the maximum size for documents that +are inserted into the database. Documents that are sent to the server but are not intended to be inserted into the +database (e.g. command documents) have a size limit of `maxBsonObjectSize + 16KiB`. When an acknowledged write concern +is used, drivers MUST NOT perform any checks related to these size limits and MUST rely on the server to raise an error +if a limit is exceeded. However, when an unacknowledged write concern is used, drivers MUST raise an error if one of the +following limits is exceeded: + +- The size of a document to be inserted MUST NOT exceed `maxBsonObjectSize`. This applies to the `document` field of an + `InsertOneModel` and the `replacement` field of a `ReplaceOneModel`. +- The size of an entry in the `ops` array MUST NOT exceed `maxBsonObjectSize + 16KiB`. +- The size of the `bulkWrite` command document MUST NOT exceed `maxBsonObjectSize + 16KiB`. + +See [SERVER-10643](https://jira.mongodb.org/browse/SERVER-10643) for more details on these size limits. + +## Auto-Encryption + +If `MongoClient.bulkWrite` is called on a `MongoClient` configured with `AutoEncryptionOpts`, drivers MUST return an +error with the message: "bulkWrite does not currently support automatic encryption". + +This is expected to be removed once [DRIVERS-2888](https://jira.mongodb.org/browse/DRIVERS-2888) is implemented. + +## Command Batching + +Drivers MUST accept an arbitrary number of operations as input to the `MongoClient.bulkWrite` method. Because the server +imposes restrictions on the size of write operations, this means that a single call to `MongoClient.bulkWrite` may +require multiple `bulkWrite` commands to be sent to the server. Drivers MUST split bulk writes into separate commands +when the user's list of operations exceeds one or more of these maximums: `maxWriteBatchSize`, `maxBsonObjectSize` (for +`OP_MSG` payload type 0), and `maxMessageSizeBytes` (for `OP_MSG` payload type 1). Each of these values can be retrieved +from the selected server's `hello` command response. Drivers MUST merge results from multiple batches into a single +`BulkWriteResult` or `BulkWriteException` to return from `MongoClient.bulkWrite`. + +When constructing the `nsInfo` array for a `bulkWrite` batch, drivers MUST only include the namespaces that are +referenced in the `ops` array for that batch. + +### Number of Writes + +`maxWriteBatchSize` defines the total number of writes allowed in one command. Drivers MUST split a bulk write into +multiple commands if the user provides more than `maxWriteBatchSize` operations in the argument for `models`. + +### Total Message Size + +Drivers MUST ensure that the total size of the `OP_MSG` built for each `bulkWrite` command does not exceed +`maxMessageSizeBytes`. + +The upper bound for the size of an `OP_MSG` includes opcode-related bytes (e.g. the `OP_MSG` header) and +operation-agnostic command field bytes (e.g. `txnNumber`, `lsid`). Drivers MUST limit the combined size of the +`bulkWrite` command document (excluding command-agnostic fields), `ops` document sequence, and `nsInfo` document +sequence to `maxMessageSizeBytes - 1000` to account for this overhead. The following pseudocode demonstrates how to +apply this limit in batch-splitting logic: + +``` +MESSAGE_OVERHEAD_BYTES = 1000 + +bulkWriteCommand = Document { "bulkWrite": 1 } +bulkWriteCommand.appendOptions(bulkWriteOptions) + +maxOpsNsInfoBytes = maxMessageSizeBytes - (MESSAGE_OVERHEAD_BYTES + bulkWriteCommand.numBytes()) + +while (writeModels.hasNext()) { + ops = DocumentSequence {} + nsInfo = DocumentSequence {} + while (true) { + if (!writeModels.hasNext()) { + break + } + model = writeModels.next() + + modelDoc = writeModel.toOpsDoc() + bytesAdded = modelDoc.numBytes() + + nsInfoDoc = null + if (!nsInfo.contains(model.namespace)) { + nsInfoDoc = model.namespace.toNsInfoDoc() + bytesAdded += nsInfoDoc.numBytes() + } + + newSize = ops.numBytes() + nsInfo.numBytes() + bytesAdded + if (newSize > maxOpsNsInfoBytes) { + break + } else { + ops.push(modelDoc) + if (nsInfoDoc != null) { + nsInfo.push(nsInfoDoc) + } + } + } + + // construct and send OP_MSG +} +``` + +See [this Q&A entry](#how-was-the-op_msg-overhead-allowance-determined) for more details on how the overhead allowance +was determined. + +Drivers MUST return an error if there is not room to add at least one operation to `ops`. + +## Handling the `bulkWrite` Server Response + +The server's response to `bulkWrite` has the following format: + +```javascript +{ + "ok": <0 | 1>, + "cursor": { + "id": , + "firstBatch": , + "ns": + }, + "nErrors": , + "nInserted": , + "nUpserted": , + "nMatched": , + "nModified": , + "nDeleted": , + ...additional command-agnostic fields +} +``` + +If any operations were successful (i.e. `nErrors` is less than the number of operations that were sent), drivers MUST +record the summary count fields in a `BulkWriteResult` to be returned to the user or embedded in a `BulkWriteException`. +Drivers MUST NOT populate the `partialResult` field in `BulkWriteException` if no operations were successful. + +Drivers MUST attempt to consume the contents of the cursor returned in the server's `bulkWrite` response before +returning to the user. This is required regardless of whether the user requested verbose or summary results, as the +results cursor always contains any write errors that occurred. If the cursor contains a nonzero cursor ID, drivers MUST +perform `getMore` until the cursor has been exhausted. Drivers MUST use the same session used for the `bulkWrite` +command for each `getMore` call. When connected to a load balancer, drivers MUST use the connection used for the +`bulkWrite` command to create the cursor to ensure the same server is targeted. + +The documents in the results cursor have the following format: + +```javascript +{ + "ok": <0 | 1>, + "idx": Int32, + "code": Optional, + "errmsg": Optional, + "errInfo": Optional, + "n": , + "nModified": Optional, + "upserted": Optional +} +``` + +If an error occurred (i.e. the value for `ok` is 0), the `code`, `errmsg`, and optionally `errInfo` fields will be +populated with details about the failure. + +If the write succeeded, (i.e. the value for `ok` is 1), `n`, `nModified`, and `upsertedId` will be populated with the +following values based on the type of write: + +| Response Field | Insert | Update | Delete | +| -------------- | ------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------ | +| `n` | The number of documents that were inserted. | The number of documents that matched the filter. | The number of documents that were deleted. | +| `nModified` | Not present. | The number of documents that were modified. | Not present. | +| `upserted` | Not present. | A document containing the `_id` value for the upserted document. Only present if an upsert took place. | Not present. | + +Note that the responses do not contain information about the type of operation that was performed. Drivers may need to +maintain the user's list of write models to infer which type of result should be recorded based on the value of `idx`. + +### Handling Insert Results + +Unlike the other result types, `InsertOneResult` contains an `insertedId` field that is generated driver-side, either by +recording the `_id` field present in the user's insert document or creating and adding one. Drivers MUST only record +these `insertedId` values in a `BulkWriteResult` when a successful response for the insert operation (i.e. +`{ "ok": 1, "n": 1 }`) is received in the results cursor. This ensures that drivers only report an `insertedId` when it +is confirmed that the insert succeeded. + +## Handling Errors + +### Top-Level Errors + +A top-level error is any error that occurs that is not the result of a single write operation failing or a write concern +error. Examples include network errors that occur when communicating with the server, command errors (`{ "ok": 0 }`) +returned from the server, client-side errors, and errors that occur when attempting to perform a `getMore` to retrieve +results from the server. + +When a top-level error is caused by a command error (i.e. an `{ "ok": 0 }` server response), drivers MUST provide access +to the raw server reply in the error returned to the user. + +When a top-level error is encountered and individual results and/or errors have already been observed, drivers MUST +embed the top-level error within a `BulkWriteException` as the `error` field to retain this information. Otherwise, +drivers MAY throw an exception containing only the top-level error. + +Encountering a top-level error MUST halt execution of a bulk write for both ordered and unordered bulk writes. This +means that drivers MUST NOT attempt to retrieve more responses from the cursor or execute any further `bulkWrite` +batches and MUST immediately throw an exception. If the results cursor has not been exhausted on the server when a +top-level error occurs, drivers MUST send the `killCursors` command to attempt to close it. The result returned from the +`killCursors` command MAY be ignored. + +### Write Concern Errors + +Write concern errors are recorded in the `writeConcernErrors` field on `BulkWriteException`. When a write concern error +is encountered, it should not terminate execution of the bulk write for either ordered or unordered bulk writes. +However, drivers MUST throw an exception at the end of execution if any write concern errors were observed. + +### Individual Write Errors + +Individual write errors retrieved from the cursor are recorded in the `writeErrors` field on `BulkWriteException`. If an +individual write error is encountered during an ordered bulk write, drivers MUST record the error in `writeErrors` and +immediately throw the exception. Otherwise, drivers MUST continue to iterate the results cursor and execute any further +`bulkWrite` batches. + +## Test Plan + +The majority of tests for `MongoClient.bulkWrite` are written in the +[Unified Test Format](../unified-test-format/unified-test-format.md) and reside in the +[CRUD unified tests directory](../crud/tests/unified/). + +Additional prose tests are specified [here](../crud/tests/README.md). These tests require constructing very large +documents to test batch splitting, which is not feasible in the unified test format at the time of writing this +specification. + +## Future Work + +### Retry `bulkWrite` when `getMore` fails with a retryable error + +When a `getMore` fails with a retryable error when attempting to iterate the results cursor, drivers could retry the +entire `bulkWrite` command to receive a fresh cursor and retry iteration. This work was omitted to minimize the scope of +the initial implementation and testing of the new bulk write API, but may be revisited in the future. + +## Q&A + +### Why are we adding a new bulk write API rather than updating the `MongoCollection.bulkWrite` implementation? + +The new `bulkWrite` command is only available in MongoDB 8.0+, so it cannot function as a drop-in replacement for the +existing bulk write implementation that uses the `insert`, `update`, and `delete` commands. Additionally, because the +new `bulkWrite` command allows operations against multiple collections and databases, `MongoClient` is a more +appropriate place to expose its functionality. + +### Why can't drivers reuse existing bulk write types? + +This specification introduces several types that are similar to existing types used in the `MongoCollection.bulkWrite` +API. Although these types are similar now, they may diverge in the future with the introduction of new options and +features to the `bulkWrite` command. Introducing new types also provides more clarity to users on the existing +differences between the collection-level and client-level bulk write APIs. For example, the `verboseResults` option is +only available for `MongoClient.bulkWrite`. + +### Why are bulk write operation results returned in a cursor? + +Returning results via a cursor rather than an array in the `bulkWrite` response allows full individual results and +errors to be returned without the risk of the response exceeding the maximum BSON object size. Using a cursor also +leaves open the opportunity to add `findAndModify` to the list of supported write operations in the future. + +### Why was the `verboseResults` option introduced, and why is its default `false`? + +The `bulkWrite` command returns top-level summary result counts and, optionally, individual results for each operation. +Compiling the individual results server-side and consuming these results driver-side is less performant than only +recording the summary counts. We expect that most users are not interested in the individual results of their operations +and that most users will rely on defaults, so `verboseResults` defaults to `false` to improve performance in the common +case. + +### Why should drivers send `bypassDocumentValidation: false` for `bulkWrite`? + +[DRIVERS-450](https://jira.mongodb.org/browse/DRIVERS-450) introduced a requirement that drivers only send a value for +`bypassDocumentValidation` on write commands if it was specified as true. The original motivation for this change is not +documented. This specification requires that drivers send `bypassDocumentValidation` in the `bulkWrite` command if it is +set by the user in `BulkWriteOptions`, regardless of its value. + +Explicitly defining `bypassDocumentValidation: false` aligns with the server's default to perform schema validation and +thus has no effect. However, checking the value of an option that the user specified and omitting it from the command +document if it matches the server's default creates unnecessary work for drivers. Always sending the user's specified +value also safeguards against the unlikely event that the server changes the default value for +`bypassDocumentValidation` in the future. + +### Why is providing access to the raw server response when a command error occurs required? + +This allows users to access new error fields that the server may add in the future without needing to upgrade their +driver version. See [DRIVERS-2385](https://jira.mongodb.org/browse/DRIVERS-2385) for more details. + +### Why are drivers required to send `nsInfo` as a document sequence? + +`nsInfo` could exceed `maxBsonObjectSize` if a user is doing `maxWriteBatchSize` operations, each operation is on a +unique namespace, and each namespace is near the +[maximum length](https://www.mongodb.com/docs/manual/reference/limits/#mongodb-limit-Restriction-on-Collection-Names) +allowed for namespaces given the values for these limits at the time of writing this specification. Providing `nsInfo` +as a document sequence reduces the likelihood that a driver would need to batch split a user's bulk write in this +scenario. + +### How was the `OP_MSG` overhead allowance determined? + +The Command Batching [Total Message Size](#total-message-size) section uses a 1000 byte overhead allowance to +approximate the number of non-`bulkWrite`-specific bytes contained in an `OP_MSG` sent for a `bulkWrite` batch. This +number was determined by constructing `OP_MSG` messages with various fields attached to the command, including +`startTransaction`, `autocommit`, and `apiVersion`. Additional room was allocated to allow for future additions to the +`OP_MSG` structure or the introduction of new command-agnostic fields. + +Drivers are required to use this value even if they are capable of determining the exact size of the message prior to +batch-splitting to standardize implementations across drivers and simplify batch-splitting testing. + +## **Changelog** + +- 2024-05-17: Update specification status to "Accepted". + +- 2024-05-10: Improve rendered format for JSON-like code blocks. + +- 2024-05-08: Bulk write specification created. diff --git a/source/crud/crud.md b/source/crud/crud.md index 1aa3e4dc52..711b77bd94 100644 --- a/source/crud/crud.md +++ b/source/crud/crud.md @@ -1,4 +1,4 @@ -# Driver CRUD API +# CRUD API - Status: Accepted - Minimum Server Version: 2.6 diff --git a/source/crud/tests/README.md b/source/crud/tests/README.md index 1b98d20b37..cfc0067528 100644 --- a/source/crud/tests/README.md +++ b/source/crud/tests/README.md @@ -64,3 +64,615 @@ CommandSucceededEvents. Then, insert an invalid document (e.g. `{x: 1}`) and ass code is `121` (i.e. DocumentValidationFailure), and that its `details` property is accessible. Additionally, assert that a CommandSucceededEvent was observed and that the `writeErrors[0].errInfo` field in the response document matches the WriteError's `details` property. + +### 3. `MongoClient.bulkWrite` batch splits a `writeModels` input with greater than `maxWriteBatchSize` operations + +Test that `MongoClient.bulkWrite` properly handles `writeModels` inputs containing a number of writes greater than +`maxWriteBatchSize`. + +This test must only be run on 8.0+ servers. + +Construct a `MongoClient` (referred to as `client`) with +[command monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) enabled to observe +CommandStartedEvents. Perform a `hello` command using `client` and record the `maxWriteBatchSize` value contained in the +response. Then, construct the following write model (referred to as `model`): + +```javascript +InsertOne: { + "namespace": "db.coll", + "document": { "a": "b" } +} +``` + +Construct a list of write models (referred to as `models`) with `model` repeated `maxWriteBatchSize + 1` times. Execute +`bulkWrite` on `client` with `models`. Assert that the bulk write succeeds and returns a `BulkWriteResult` with an +`insertedCount` value of `maxWriteBatchSize + 1`. + +Assert that two CommandStartedEvents (referred to as `firstEvent` and `secondEvent`) were observed for the `bulkWrite` +command. Assert that the length of `firstEvent.command.ops` is `maxWriteBatchSize`. Assert that the length of +`secondEvent.command.ops` is 1. If the driver exposes `operationId`s in its CommandStartedEvents, assert that +`firstEvent.operationId` is equal to `secondEvent.operationId`. + +### 4. `MongoClient.bulkWrite` batch splits when an `ops` payload exceeds `maxMessageSizeBytes` + +Test that `MongoClient.bulkWrite` properly handles a `writeModels` input which constructs an `ops` array larger than +`maxMessageSizeBytes`. + +This test must only be run on 8.0+ servers. + +Construct a `MongoClient` (referred to as `client`) with +[command monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) enabled to observe +CommandStartedEvents. Perform a `hello` command using `client` and record the following values from the response: +`maxBsonObjectSize` and `maxMessageSizeBytes`. Then, construct the following document (referred to as `document`): + +```javascript +{ + "a": "b".repeat(maxBsonObjectSize - 500) +} +``` + +Construct the following write model (referred to as `model`): + +```javascript +InsertOne: { + "namespace": "db.coll", + "document": document +} +``` + +Use the following calculation to determine the number of inserts that should be provided to `MongoClient.bulkWrite`: +`maxMessageSizeBytes / maxBsonObjectSize + 1` (referred to as `numModels`). This number ensures that the inserts +provided to `MongoClient.bulkWrite` will require multiple `bulkWrite` commands to be sent to the server. + +Construct as list of write models (referred to as `models`) with `model` repeated `numModels` times. Then execute +`bulkWrite` on `client` with `models`. Assert that the bulk write succeeds and returns a `BulkWriteResult` with an +`insertedCount` value of `numModels`. + +Assert that two CommandStartedEvents (referred to as `firstEvent` and `secondEvent`) were observed. Assert that the +length of `firstEvent.command.ops` is `numModels - 1`. Assert that the length of `secondEvent.command.ops` is 1. If the +driver exposes `operationId`s in its CommandStartedEvents, assert that `firstEvent.operationId` is equal to +`secondEvent.operationId`. + +### 5. `MongoClient.bulkWrite` collects `WriteConcernError`s across batches + +Test that `MongoClient.bulkWrite` properly collects and reports `writeConcernError`s returned in separate batches. + +This test must only be run on 8.0+ servers. + +Construct a `MongoClient` (referred to as `client`) with `retryWrites: false` configured and +[command monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) enabled to observe +CommandStartedEvents. Perform a `hello` command using `client` and record the `maxWriteBatchSize` value contained in the +response. Then, configure the following fail point with `client`: + +```javascript +{ + "configureFailPoint": "failCommand", + "mode": { "times": 2 }, + "data": { + "failCommands": ["bulkWrite"], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } +} +``` + +Construct the following write model (referred to as `model`): + +```javascript +InsertOne: { + "namespace": "db.coll", + "document": { "a": "b" } +} +``` + +Construct a list of write models (referred to as `models`) with `model` repeated `maxWriteBatchSize + 1` times. Execute +`bulkWrite` on `client` with `models`. Assert that the bulk write fails and returns a `BulkWriteError` (referred to as +`error`). + +Assert that `error.writeConcernErrors` has a length of 2. + +Assert that `error.partialResult` is populated. Assert that `error.partialResult.insertedCount` is equal to +`maxWriteBatchSize + 1`. + +Assert that two CommandStartedEvents were observed for the `bulkWrite` command. + +### 6. `MongoClient.bulkWrite` handles individual `WriteError`s across batches + +Test that `MongoClient.bulkWrite` handles individual write errors across batches for ordered and unordered bulk writes. + +This test must only be run on 8.0+ servers. + +Construct a `MongoClient` (referred to as `client`) with +[command monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) enabled to observe +CommandStartedEvents. Perform a `hello` command using `client` and record the `maxWriteBatchSize` value contained in the +response. + +Construct a `MongoCollection` (referred to as `collection`) with the namespace "db.coll" (referred to as `namespace`). +Drop `collection`. Then, construct the following document (referred to as `document`): + +```javascript +{ + "_id": 1 +} +``` + +Insert `document` into `collection`. + +Create the following write model (referred to as `model`): + +```javascript +InsertOne { + "namespace": namespace, + "document": document +} +``` + +Construct a list of write models (referred to as `models`) with `model` repeated `maxWriteBatchSize + 1` times. + +#### Unordered + +Test that an unordered bulk write collects `WriteError`s across batches. + +Execute `bulkWrite` on `client` with `models` and `ordered` set to false. Assert that the bulk write fails and returns a +`BulkWriteError` (referred to as `unorderedError`). + +Assert that `unorderedError.writeErrors` has a length of `maxWriteBatchSize + 1`. + +Assert that two CommandStartedEvents were observed for the `bulkWrite` command. + +#### Ordered + +Test that an ordered bulk write does not execute further batches when a `WriteError` occurs. + +Execute `bulkWrite` on `client` with `models` and `ordered` set to true. Assert that the bulk write fails and returns a +`BulkWriteError` (referred to as `orderedError`). + +Assert that `orderedError.writeErrors` has a length of 1. + +Assert that one CommandStartedEvent was observed for the `bulkWrite` command. + +### 7. `MongoClient.bulkWrite` handles a cursor requiring a `getMore` + +Test that `MongoClient.bulkWrite` properly iterates the results cursor when `getMore` is required. + +This test must only be run on 8.0+ servers. + +Construct a `MongoClient` (referred to as `client`) with +[command monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) enabled to observe +CommandStartedEvents. Perform a `hello` command using `client` and record the `maxBsonObjectSize` value from the +response. + +Construct a `MongoCollection` (referred to as `collection`) with the namespace "db.coll" (referred to as `namespace`). +Drop `collection`. Then create the following list of write models (referred to as `models`): + +```javascript +UpdateOne { + "namespace": namespace, + "filter": { "_id": "a".repeat(maxBsonObjectSize / 2) }, + "update": { "$set": { "x": 1 } }, + "upsert": true +}, +UpdateOne { + "namespace": namespace, + "filter": { "_id": "b".repeat(maxBsonObjectSize / 2) }, + "update": { "$set": { "x": 1 } }, + "upsert": true +}, +``` + +Execute `bulkWrite` on `client` with `models` and `verboseResults` set to true. Assert that the bulk write succeeds and +returns a `BulkWriteResult` (referred to as `result`). + +Assert that `result.upsertedCount` is equal to 2. + +Assert that the length of `result.updateResults` is equal to 2. + +Assert that a CommandStartedEvent was observed for the `getMore` command. + +### 8. `MongoClient.bulkWrite` handles a cursor requiring `getMore` within a transaction + +Test that `MongoClient.bulkWrite` executed within a transaction properly iterates the results cursor when `getMore` is +required. + +This test must only be run on 8.0+ servers. This test must not be run against standalone servers. + +Construct a `MongoClient` (referred to as `client`) with +[command monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) enabled to observe +CommandStartedEvents. Perform a `hello` command using `client` and record the `maxBsonObjectSize` value from the +response. + +Construct a `MongoCollection` (referred to as `collection`) with the namespace "db.coll" (referred to as `namespace`). +Drop `collection`. + +Start a session on `client` (referred to as `session`). Start a transaction on `session`. + +Create the following list of write models (referred to as `models`): + +```javascript +UpdateOne { + "namespace": namespace, + "filter": { "_id": "a".repeat(maxBsonObjectSize / 2) }, + "update": { "$set": { "x": 1 } }, + "upsert": true +}, +UpdateOne { + "namespace": namespace, + "filter": { "_id": "b".repeat(maxBsonObjectSize / 2) }, + "update": { "$set": { "x": 1 } }, + "upsert": true +}, +``` + +Execute `bulkWrite` on `client` with `models`, `session`, and `verboseResults` set to true. Assert that the bulk write +succeeds and returns a `BulkWriteResult` (referred to as `result`). + +Assert that `result.upsertedCount` is equal to 2. + +Assert that the length of `result.updateResults` is equal to 2. + +Assert that a CommandStartedEvent was observed for the `getMore` command. + +### 9. `MongoClient.bulkWrite` handles a `getMore` error + +Test that `MongoClient.bulkWrite` properly handles a failure that occurs when attempting a `getMore`. + +This test must only be run on 8.0+ servers. + +Construct a `MongoClient` (referred to as `client`) with +[command monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) enabled to observe +CommandStartedEvents. Perform a `hello` command using `client` and record the `maxBsonObjectSize` value from the +response. Then, configure the following fail point with `client`: + +```javascript +{ + "configureFailPoint": "failCommand", + "mode": { "times": 1 }, + "data": { + "failCommands": ["getMore"], + "errorCode": 8 + } +} +``` + +Construct a `MongoCollection` (referred to as `collection`) with the namespace "db.coll" (referred to as `namespace`). +Drop `collection`. Then create the following list of write models (referred to as `models`): + +```javascript +UpdateOne { + "namespace": namespace, + "filter": { "_id": "a".repeat(maxBsonObjectSize / 2) }, + "update": { "$set": { "x": 1 } }, + "upsert": true +}, +UpdateOne { + "namespace": namespace, + "filter": { "_id": "b".repeat(maxBsonObjectSize / 2) }, + "update": { "$set": { "x": 1 } }, + "upsert": true +}, +``` + +Execute `bulkWrite` on `client` with `models` and `verboseResults` set to true. Assert that the bulk write fails and +returns a `BulkWriteError` (referred to as `bulkWriteError`). + +Assert that `bulkWriteError.error` is populated with an error (referred to as `topLevelError`). Assert that +`topLevelError.errorCode` is equal to 8. + +Assert that `bulkWriteError.partialResult` is populated with a result (referred to as `partialResult`). Assert that +`partialResult.upsertedCount` is equal to 2. Assert that the length of `partialResult.updateResults` is equal to 1. + +Assert that a CommandStartedEvent was observed for the `getMore` command. + +Assert that a CommandStartedEvent was observed for the `killCursors` command. + +### 10. `MongoClient.bulkWrite` returns error for unacknowledged too-large insert + +This test must only be run on 8.0+ servers. + +Construct a `MongoClient` (referred to as `client`). + +Perform a `hello` command using `client` and record the following values from the response: `maxBsonObjectSize`. + +Then, construct the following document (referred to as `document`): + +```javascript +{ + "a": "b".repeat(maxBsonObjectSize) +} +``` + +#### With insert + +Construct the following write model (referred to as `model`): + +```javascript +InsertOne: { + "namespace": "db.coll", + "document": document +} +``` + +Construct as list of write models (referred to as `models`) with the one `model`. + +Call `MongoClient.bulkWrite` with `models` and `BulkWriteOptions.writeConcern` set to an unacknowledged write concern. + +Expect a client-side error due the size. + +#### With replace + +Construct the following write model (referred to as `model`): + +```javascript +ReplaceOne: { + "namespace": "db.coll", + "filter": {}, + "replacement": document +} +``` + +Construct as list of write models (referred to as `models`) with the one `model`. + +Call `MongoClient.bulkWrite` with `models` and `BulkWriteOptions.writeConcern` set to an unacknowledged write concern. + +Expect a client-side error due the size. + +### 11. `MongoClient.bulkWrite` batch splits when the addition of a new namespace exceeds the maximum message size + +Test that `MongoClient.bulkWrite` batch splits a bulk write when the addition of a new namespace to `nsInfo` causes the +size of the message to exceed `maxMessageSizeBytes - 1000`. + +This test must only be run on 8.0+ servers. + +Repeat the following setup for each test case: + +### Setup + +Construct a `MongoClient` (referred to as `client`) with +[command monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) enabled to observe +CommandStartedEvents. Perform a `hello` command using `client` and record the following values from the response: +`maxBsonObjectSize` and `maxMessageSizeBytes`. + +Calculate the following values: + +``` +opsBytes = maxMessageSizeBytes - 1122 +numModels = opsBytes / maxBsonObjectSize +remainderBytes = opsBytes % maxBsonObjectSize +``` + +Construct the following write model (referred to as `firstModel`): + +```javascript +InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxBsonObjectSize - 57) } +} +``` + +Create a list of write models (referred to as `models`) with `firstModel` repeated `numModels` times. + +If `remainderBytes` is greater than or equal to 217, add 1 to `numModels` and append the following write model to +`models`: + +```javascript +InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(remainderBytes - 57) } +} +``` + +Then perform the following two tests: + +#### Case 1: No batch-splitting required + +Create the following write model (referred to as `sameNamespaceModel`): + +```javascript +InsertOne { + "namespace": "db.coll", + "document": { "a": "b" } +} +``` + +Append `sameNamespaceModel` to `models`. + +Execute `bulkWrite` on `client` with `models`. Assert that the bulk write succeeds and returns a `BulkWriteResult` +(referred to as `result`). + +Assert that `result.insertedCount` is equal to `numModels + 1`. + +Assert that one CommandStartedEvent was observed for the `bulkWrite` command (referred to as `event`). + +Assert that the length of `event.command.ops` is `numModels + 1`. Assert that the length of `event.command.nsInfo` is 1. +Assert that the namespace contained in `event.command.nsInfo` is "db.coll". + +#### Case 2: Batch-splitting required + +Construct the following namespace (referred to as `namespace`): + +``` +"db." + "c".repeat(200) +``` + +Create the following write model (referred to as `newNamespaceModel`): + +```javascript +InsertOne { + "namespace": namespace, + "document": { "a": "b" } +} +``` + +Append `newNamespaceModel` to `models`. + +Execute `bulkWrite` on `client` with `models`. Assert that the bulk write succeeds and returns a `BulkWriteResult` +(referred to as `result`). + +Assert that `result.insertedCount` is equal to `numModels + 1`. + +Assert that two CommandStartedEvents were observed for the `bulkWrite` command (referred to as `firstEvent` and +`secondEvent`). + +Assert that the length of `firstEvent.command.ops` is equal to `numModels`. Assert that the length of +`firstEvent.command.nsInfo` is equal to 1. Assert that the namespace contained in `firstEvent.command.nsInfo` is +"db.coll". + +Assert that the length of `secondEvent.command.ops` is equal to 1. Assert that the length of +`secondEvent.command.nsInfo` is equal to 1. Assert that the namespace contained in `secondEvent.command.nsInfo` is +`namespace`. + +#### Details on size calculations + +This information is not needed to implement this prose test, but is documented for future reference. This test is +designed to work if `maxBsonObjectSize` or `maxMessageSizeBytes` changes, but will need to be updated if a required +field is added to the `bulkWrite` command or the `insert` operation document, or if the overhead `OP_MSG` allowance is +changed in the bulk write specification. + +The command document for the `bulkWrite` has the following structure and size: + +```javascript +{ + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true +} + +Size: 43 bytes +``` + +Each write model will create an `ops` document with the following structure and size: + +```javascript +{ + "insert": <0 | 1>, + "document": { + "_id": , + "a": + } +} + +Size: 57 bytes + +``` + +The `ops` document for both `newNamespaceModel` and `sameNamespaceModel` has a string with one character, so it is a +total of 58 bytes. + +The models using the "db.coll" namespace will create one `nsInfo` document with the following structure and size: + +```javascript +{ + "ns": "db.coll" +} + +Size: 21 bytes +``` + +`newNamespaceModel` will create an `nsInfo` document with the following structure and size: + +```javascript +{ + "ns": "db." +} + +Size: 217 bytes +``` + +We need to fill up the rest of the message with bytes such that another `ops` document will fit, but another `nsInfo` +entry will not. The following calculations are used: + +``` +# 1000 is the OP_MSG overhead required in the spec +maxBulkWriteBytes = maxMessageSizeBytes - 1000 + +# bulkWrite command + first namespace entry +existingMessageBytes = 43 + 21 + +# Space to fit the last model's ops entry +lastModelBytes = 58 + +remainingBulkWriteBytes = maxBulkWriteBytes - existingMessageBytes - lastModelBytes + +# With the actual numbers plugged in +remainingBulkWriteBytes = maxMessageSizeBytes - 1122 +``` + +### 12. `MongoClient.bulkWrite` returns an error if no operations can be added to `ops` + +Test that `MongoClient.bulkWrite` returns an error if an operation provided exceeds `maxMessageSizeBytes` such that an +empty `ops` payload would be sent. + +This test must only be run on 8.0+ servers. This test may be skipped by drivers that are not able to construct +arbitrarily large documents. + +Construct a `MongoClient` (referred to as `client`). Perform a `hello` command using `client` and record the +`maxMessageSizeBytes` value contained in the response. + +#### Case 1: `document` too large + +Construct the following write model (referred to as `largeDocumentModel`): + +```javascript +InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxMessageSizeBytes) } +} +``` + +Execute `bulkWrite` on `client` with `largeDocumentModel`. Assert that an error (referred to as `error`) is returned. +Assert that `error` is a client error. + +#### Case 2: `namespace` too large + +Construct the following namespace (referred to as `namespace`): + +``` +"db." + "c".repeat(maxMessageSizeBytes) +``` + +Construct the following write model (referred to as `largeNamespaceModel`): + +```javascript +InsertOne { + "namespace": namespace, + "document": { "a": "b" } +} +``` + +Execute `bulkWrite` on `client` with `largeNamespaceModel`. Assert that an error (referred to as `error`) is returned. +Assert that `error` is a client error. + +### 13. `MongoClient.bulkWrite` returns an error if auto-encryption is configured + +This test is expected to be removed when [DRIVERS-2888](https://jira.mongodb.org/browse/DRIVERS-2888) is resolved. + +Test that `MongoClient.bulkWrite` returns an error if the client has auto-encryption configured. + +This test must only be run on 8.0+ servers. + +Construct a `MongoClient` (referred to as `client`) configured with the following `AutoEncryptionOpts`: + +```javascript +AutoEncryptionOpts { + "keyVaultNamespace": "db.coll", + "kmsProviders": { + "aws": { + "accessKeyId": "foo", + "secretAccessKey": "bar" + } + } +} +``` + +Construct the following write model (referred to as `model`): + +```javascript +InsertOne { + "namespace": "db.coll", + "document": { "a": "b" } +} +``` + +Execute `bulkWrite` on `client` with `model`. Assert that an error (referred to as `error`) is returned. Assert that +`error` is a client error containing the message: "bulkWrite does not currently support automatic encryption". diff --git a/source/crud/tests/unified/client-bulkWrite-delete-options.json b/source/crud/tests/unified/client-bulkWrite-delete-options.json new file mode 100644 index 0000000000..5bdf2b124a --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-delete-options.json @@ -0,0 +1,267 @@ +{ + "description": "client bulkWrite delete options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulk write delete with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + }, + { + "description": "client bulk write delete with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "hint": "_id_", + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-delete-options.yml b/source/crud/tests/unified/client-bulkWrite-delete-options.yml new file mode 100644 index 0000000000..db8b9f46d7 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-delete-options.yml @@ -0,0 +1,136 @@ +description: "client bulkWrite delete options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + collation: &collation { "locale": "simple" } + hint: &hint _id_ + +tests: + - description: "client bulk write delete with collation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + collation: *collation + - deleteMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + collation: *collation + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 3 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 1: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - delete: 0 + filter: { _id: 1 } + collation: *collation + multi: false + - delete: 0 + filter: { _id: { $gt: 1 } } + collation: *collation + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] + - description: "client bulk write delete with hint" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + hint: *hint + - deleteMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + hint: *hint + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 3 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 1: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - delete: 0 + filter: { _id: 1 } + hint: *hint + multi: false + - delete: 0 + filter: { _id: { $gt: 1 } } + hint: *hint + multi: true + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] diff --git a/source/crud/tests/unified/client-bulkWrite-errorResponse.json b/source/crud/tests/unified/client-bulkWrite-errorResponse.json new file mode 100644 index 0000000000..edf2339d8a --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-errorResponse.json @@ -0,0 +1,68 @@ +{ + "description": "client bulkWrite errorResponse", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite operations support errorResponse assertions", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-errorResponse.yml b/source/crud/tests/unified/client-bulkWrite-errorResponse.yml new file mode 100644 index 0000000000..45e53171ec --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-errorResponse.yml @@ -0,0 +1,37 @@ +description: "client bulkWrite errorResponse" +schemaVersion: "1.12" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + useMultipleMongoses: false # Avoid setting fail points with multiple mongoses + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite operations support errorResponse assertions" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ bulkWrite ] + errorCode: &errorCode 8 # UnknownError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1 } + expectError: + errorCode: *errorCode + errorResponse: + code: *errorCode diff --git a/source/crud/tests/unified/client-bulkWrite-errors.json b/source/crud/tests/unified/client-bulkWrite-errors.json new file mode 100644 index 0000000000..9f17f85331 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-errors.json @@ -0,0 +1,454 @@ +{ + "description": "client bulkWrite errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "writeConcernErrorCode": 91, + "writeConcernErrorMessage": "Replication is being shut down", + "undefinedVarCode": 17276 + }, + "tests": [ + { + "description": "an individual operation fails during an ordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "an individual operation fails during an unordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "2": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "detailed results are omitted from error when verboseResults is false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a top-level failure occurs during a bulkWrite", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "x": 1 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "errorCode": 8 + } + } + ] + }, + { + "description": "a bulk write with only errors does not report a partial result", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": {} + }, + "writeErrors": { + "0": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a write concern error occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 10 + } + }, + "updateResults": {}, + "deleteResults": {} + }, + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ] + }, + { + "description": "an empty list of write models is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [], + "verboseResults": true + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-errors.yml b/source/crud/tests/unified/client-bulkWrite-errors.yml new file mode 100644 index 0000000000..3a420f1429 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-errors.yml @@ -0,0 +1,240 @@ +description: "client bulkWrite errors" +schemaVersion: "1.21" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + uriOptions: + retryWrites: false + useMultipleMongoses: false # Target a single mongos with failpoint + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + writeConcernErrorCode: &writeConcernErrorCode 91 + writeConcernErrorMessage: &writeConcernErrorMessage "Replication is being shut down" + undefinedVarCode: &undefinedVarCode 17276 # Use of an undefined variable + +tests: + - description: "an individual operation fails during an ordered bulkWrite" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 1 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + writeErrors: + 1: + code: *undefinedVarCode + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "an individual operation fails during an unordered bulkWrite" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + ordered: false + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 2 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 2: + deletedCount: 1 + writeErrors: + 1: + code: *undefinedVarCode + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 2, x: 22 } + - description: "detailed results are omitted from error when verboseResults is false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: false + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 1 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + writeErrors: + 1: + code: *undefinedVarCode + - description: "a top-level failure occurs during a bulkWrite" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - bulkWrite + errorCode: 8 # UnknownError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { x: 1 } + verboseResults: true + expectError: + errorCode: 8 + - description: "a bulk write with only errors does not report a partial result" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + verboseResults: true + expectError: + expectResult: + $$unsetOrMatches: {} # Empty or nonexistent result when no successful writes occurred + writeErrors: + 0: + code: *undefinedVarCode + - description: "a write concern error occurs during a bulkWrite" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - bulkWrite + writeConcernError: + code: *writeConcernErrorCode + errmsg: *writeConcernErrorMessage + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 10 } + verboseResults: true + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 10 + updateResults: {} + deleteResults: {} + writeConcernErrors: + - code: *writeConcernErrorCode + message: *writeConcernErrorMessage + - description: "an empty list of write models is a client-side error" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: [] + verboseResults: true + expectError: + isClientError: true diff --git a/source/crud/tests/unified/client-bulkWrite-mixed-namespaces.json b/source/crud/tests/unified/client-bulkWrite-mixed-namespaces.json new file mode 100644 index 0000000000..f90755dc85 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-mixed-namespaces.json @@ -0,0 +1,314 @@ +{ + "description": "client bulkWrite with mixed namespaces", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "db1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "_yamlAnchors": { + "db0Coll0Namespace": "db0.coll0", + "db0Coll1Namespace": "db0.coll1", + "db1Coll2Namespace": "db1.coll2" + }, + "tests": [ + { + "description": "client bulkWrite with mixed namespaces", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 1 + } + } + }, + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 2 + } + } + }, + { + "updateOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 45 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 2, + "insertResults": { + "0": { + "insertedId": 1 + }, + "1": { + "insertedId": 2 + } + }, + "updateResults": { + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "5": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + }, + "4": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1 + } + }, + { + "insert": 0, + "document": { + "_id": 2 + } + }, + { + "update": 1, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 2, + "filter": { + "_id": 3 + }, + "multi": false + }, + { + "delete": 1, + "filter": { + "_id": 2 + }, + "multi": false + }, + { + "update": 2, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 45 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "db0.coll0" + }, + { + "ns": "db0.coll1" + }, + { + "ns": "db1.coll2" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 4, + "x": 45 + } + ] + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-mixed-namespaces.yml b/source/crud/tests/unified/client-bulkWrite-mixed-namespaces.yml new file mode 100644 index 0000000000..4e4cb01e16 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-mixed-namespaces.yml @@ -0,0 +1,146 @@ +description: "client bulkWrite with mixed namespaces" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name db0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + - collection: + id: &collection1 collection1 + database: *database0 + collectionName: &collection1Name coll1 + - database: + id: &database1 database1 + client: *client0 + databaseName: &database1Name db1 + - collection: + id: &collection2 collection2 + database: *database1 + collectionName: &collection2Name coll2 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] + - databaseName: *database0Name + collectionName: *collection1Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - databaseName: *database1Name + collectionName: *collection2Name + documents: + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + +_yamlAnchors: + db0Coll0Namespace: &db0Coll0Namespace "db0.coll0" + db0Coll1Namespace: &db0Coll1Namespace "db0.coll1" + db1Coll2Namespace: &db1Coll2Namespace "db1.coll2" + +tests: + - description: "client bulkWrite with mixed namespaces" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *db0Coll0Namespace + document: { _id: 1 } + - insertOne: + namespace: *db0Coll0Namespace + document: { _id: 2 } + - updateOne: + namespace: *db0Coll1Namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - deleteOne: + namespace: *db1Coll2Namespace + filter: { _id: 3 } + - deleteOne: + namespace: *db0Coll1Namespace + filter: { _id: 2 } + - replaceOne: + namespace: *db1Coll2Namespace + filter: { _id: 4 } + replacement: { x: 45 } + verboseResults: true + expectResult: + insertedCount: 2 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 2 + insertResults: + 0: + insertedId: 1 + 1: + insertedId: 2 + updateResults: + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 5: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + 4: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + bulkWrite: 1 + ops: + - insert: 0 + document: { _id: 1 } + - insert: 0 + document: { _id: 2 } + - update: 1 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - delete: 2 + filter: { _id: 3 } + multi: false + - delete: 1 + filter: { _id: 2 } + multi: false + - update: 2 + filter: { _id: 4 } + updateMods: { x: 45 } + multi: false + nsInfo: + - ns: *db0Coll0Namespace + - ns: *db0Coll1Namespace + - ns: *db1Coll2Namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1 } + - { _id: 2 } + - databaseName: *database0Name + collectionName: *collection1Name + documents: + - { _id: 1, x: 12 } + - databaseName: *database1Name + collectionName: *collection2Name + documents: + - { _id: 4, x: 45 } diff --git a/source/crud/tests/unified/client-bulkWrite-options.json b/source/crud/tests/unified/client-bulkWrite-options.json new file mode 100644 index 0000000000..a1e6af3bf3 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-options.json @@ -0,0 +1,715 @@ +{ + "description": "client bulkWrite top-level options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "writeConcernClient", + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "comment": { + "bulk": "write" + }, + "let": { + "id1": 1, + "id2": 2 + }, + "writeConcern": { + "w": "majority" + } + }, + "tests": [ + { + "description": "client bulkWrite comment", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "comment": { + "bulk": "write" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "comment": { + "bulk": "write" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": true, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite let", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "let": { + "id1": 1, + "id2": 2 + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "1": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "let": { + "id1": 1, + "id2": 2 + }, + "ops": [ + { + "update": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation: false is sent", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": false, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite inherits writeConcern from client", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": 1 + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern option overrides client writeConcern", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-options.yml b/source/crud/tests/unified/client-bulkWrite-options.yml new file mode 100644 index 0000000000..fdcf788799 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-options.yml @@ -0,0 +1,350 @@ +description: "client bulkWrite top-level options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - client: + id: &writeConcernClient writeConcernClient + uriOptions: + &clientWriteConcern { w: 1 } + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + comment: &comment { bulk: "write" } + let: &let { id1: 1, id2: 2 } + writeConcern: &majorityWriteConcern { w: "majority" } + +tests: + - description: "client bulkWrite comment" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + comment: *comment + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + comment: *comment + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite bypassDocumentValidation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + bypassDocumentValidation: true + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + bypassDocumentValidation: true + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite let" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id1" ] + update: + $inc: { x: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] + let: *let + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 1 + modifiedCount: 1 + deletedCount: 1 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 1: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + let: *let + ops: + - update: 0 + filter: + $expr: + $eq: [ "$_id", "$$id1" ] + updateMods: { $inc: { x: 1 } } + multi: false + - delete: 0 + filter: + $expr: + $eq: [ "$_id", "$$id2" ] + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, x: 12 } + - description: "client bulkWrite bypassDocumentValidation: false is sent" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + bypassDocumentValidation: false + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + bypassDocumentValidation: false + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite writeConcern" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + writeConcern: *majorityWriteConcern + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: *majorityWriteConcern + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + - description: "client bulkWrite inherits writeConcern from client" + operations: + - object: *writeConcernClient + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *writeConcernClient + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: { w: 1 } + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + - description: "client bulkWrite writeConcern option overrides client writeConcern" + operations: + - object: *writeConcernClient + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + writeConcern: *majorityWriteConcern + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *writeConcernClient + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: *majorityWriteConcern + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace diff --git a/source/crud/tests/unified/client-bulkWrite-ordered.json b/source/crud/tests/unified/client-bulkWrite-ordered.json new file mode 100644 index 0000000000..a55d6619b5 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-ordered.json @@ -0,0 +1,290 @@ +{ + "description": "client bulkWrite with ordered option", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with ordered: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite with ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-ordered.yml b/source/crud/tests/unified/client-bulkWrite-ordered.yml new file mode 100644 index 0000000000..dc56dcb860 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-ordered.yml @@ -0,0 +1,152 @@ +description: "client bulkWrite with ordered option" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite with ordered: false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + ordered: false + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: false + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - description: "client bulkWrite with ordered: true" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + ordered: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - description: "client bulkWrite defaults to ordered: true" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } diff --git a/source/crud/tests/unified/client-bulkWrite-results.json b/source/crud/tests/unified/client-bulkWrite-results.json new file mode 100644 index 0000000000..97a9e50b21 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-results.json @@ -0,0 +1,832 @@ +{ + "description": "client bulkWrite results", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with verboseResults: true returns detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with verboseResults: false omits detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to verboseResults: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-results.yml b/source/crud/tests/unified/client-bulkWrite-results.yml new file mode 100644 index 0000000000..eb001bbb42 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-results.yml @@ -0,0 +1,311 @@ +description: "client bulkWrite results" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite with verboseResults: true returns detailed results" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + 0: + insertedId: 8 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 3: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 4 + deleteResults: + 4: + deletedCount: 1 + 5: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: "client bulkWrite with verboseResults: false omits detailed results" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: false + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: "client bulkWrite defaults to verboseResults: false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } diff --git a/source/crud/tests/unified/client-bulkWrite-update-options.json b/source/crud/tests/unified/client-bulkWrite-update-options.json new file mode 100644 index 0000000000..93a2774e5f --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-update-options.json @@ -0,0 +1,948 @@ +{ + "description": "client bulkWrite update options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulkWrite update with arrayFilters", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 4, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_", + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_", + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_", + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with upsert", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 6 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 5 + }, + "1": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 6 + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 5 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 6 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 5, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 6, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-update-options.yml b/source/crud/tests/unified/client-bulkWrite-update-options.yml new file mode 100644 index 0000000000..fe188a490c --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-update-options.yml @@ -0,0 +1,337 @@ +description: "client bulkWrite update options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, array: [ 1, 2, 3 ] } + - { _id: 2, array: [ 1, 2, 3 ] } + - { _id: 3, array: [ 1, 2, 3 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + collation: &collation { "locale": "simple" } + hint: &hint _id_ + +tests: + - description: "client bulkWrite update with arrayFilters" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $set: + array.$[i]: 4 + arrayFilters: [ i: { $gte: 2 } ] + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: + $set: + array.$[i]: 5 + arrayFilters: [ i: { $gte: 2 } ] + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $set: + array.$[i]: 4 + arrayFilters: [ i: { $gte: 2 } ] + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: + $set: + array.$[i]: 5 + arrayFilters: [ i: { $gte: 2 } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 4, 4 ] } + - { _id: 2, array: [ 1, 5, 5 ] } + - { _id: 3, array: [ 1, 5, 5 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + - description: "client bulkWrite update with collation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $set: { array: [ 1, 2, 4 ] } } + collation: *collation + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $set: { array: [ 1, 2, 5 ] } } + collation: *collation + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { array: [ 1, 2, 6 ] } + collation: *collation + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 4 + modifiedCount: 4 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + collation: *collation + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $set: { array: [ 1, 2, 5 ] } } + collation: *collation + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { array: [ 1, 2, 6 ] } + collation: *collation + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 4 ] } + - { _id: 2, array: [ 1, 2, 5 ] } + - { _id: 3, array: [ 1, 2, 5 ] } + - { _id: 4, array: [ 1, 2, 6 ] } + - description: "client bulkWrite update with hint" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $set: { array: [ 1, 2, 4 ] } } + hint: *hint + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $set: { array: [ 1, 2, 5 ] } } + hint: *hint + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { array: [ 1, 2, 6 ] } + hint: *hint + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 4 + modifiedCount: 4 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + hint: *hint + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $set: { array: [ 1, 2, 5 ] } } + hint: *hint + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { array: [ 1, 2, 6 ] } + hint: *hint + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 4 ] } + - { _id: 2, array: [ 1, 2, 5 ] } + - { _id: 3, array: [ 1, 2, 5 ] } + - { _id: 4, array: [ 1, 2, 6 ] } + - description: "client bulkWrite update with upsert" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 5 } + update: { $set: { array: [ 1, 2, 4 ] } } + upsert: true + - replaceOne: + namespace: *namespace + filter: { _id: 6 } + replacement: { array: [ 1, 2, 6 ] } + upsert: true + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 2 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 5 + 1: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 6 + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 5 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + upsert: true + multi: false + - update: 0 + filter: { _id: 6 } + updateMods: { array: [ 1, 2, 6 ] } + upsert: true + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 3 ] } + - { _id: 2, array: [ 1, 2, 3 ] } + - { _id: 3, array: [ 1, 2, 3 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + - { _id: 5, array: [ 1, 2, 4 ] } + - { _id: 6, array: [ 1, 2, 6 ] } diff --git a/source/crud/tests/unified/client-bulkWrite-update-pipeline.json b/source/crud/tests/unified/client-bulkWrite-update-pipeline.json new file mode 100644 index 0000000000..57b6c9c1ba --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-update-pipeline.json @@ -0,0 +1,257 @@ +{ + "description": "client bulkWrite update pipeline", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": {}, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": {}, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-update-pipeline.yml b/source/crud/tests/unified/client-bulkWrite-update-pipeline.yml new file mode 100644 index 0000000000..fe0e29a508 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-update-pipeline.yml @@ -0,0 +1,132 @@ +description: "client bulkWrite update pipeline" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - {_id: 1, x: 1} + - {_id: 2, x: 2} + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite updateOne with pipeline" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + - $addFields: + foo: 1 + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 1 + modifiedCount: 1 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { "$$exists": false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + - $addFields: + foo: 1 + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - {_id: 1, x: 1, foo: 1} + - {_id: 2, x: 2 } + + - description: "client bulkWrite updateMany with pipeline" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: {} + update: + - $addFields: + foo: 1 + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { "$$exists": false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { } + updateMods: + - $addFields: + foo: 1 + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - {_id: 1, x: 1, foo: 1} + - {_id: 2, x: 2, foo: 1} diff --git a/source/crud/tests/unified/client-bulkWrite-update-validation.json b/source/crud/tests/unified/client-bulkWrite-update-validation.json new file mode 100644 index 0000000000..617e711338 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-update-validation.json @@ -0,0 +1,216 @@ +{ + "description": "client-bulkWrite-update-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/source/crud/tests/unified/client-bulkWrite-update-validation.yml b/source/crud/tests/unified/client-bulkWrite-update-validation.yml new file mode 100644 index 0000000000..478554c322 --- /dev/null +++ b/source/crud/tests/unified/client-bulkWrite-update-validation.yml @@ -0,0 +1,79 @@ +description: "client-bulkWrite-update-validation" + +schemaVersion: "1.1" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: &initialData + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite replaceOne prohibits atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - replaceOne: + namespace: *namespace + filter: { _id: 1 } + replacement: { $set: { x: 22 } } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData + + - description: "client bulkWrite updateOne requires atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { x: 22 } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData + + - description: "client bulkWrite updateMany requires atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + update: { x: 44 } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData diff --git a/source/driver-mantras.md b/source/driver-mantras.md new file mode 100644 index 0000000000..bf52c62f0a --- /dev/null +++ b/source/driver-mantras.md @@ -0,0 +1,69 @@ +# Driver Mantras + +When developing specifications -- and the drivers themselves -- we follow the following principles: + +### Strive to be idiomatic, but favor consistency + +Drivers attempt to provide the easiest way to work with MongoDB in a given language ecosystem, while specifications +attempt to provide a consistent behavior and experience across all languages. Drivers should strive to be as idiomatic +as possible while meeting the specification and staying true to the original intent. + +### No Knobs + +Too many choices stress out users. Whenever possible, we aim to minimize the number of configuration options exposed to +users. In particular, if a typical user would have no idea how to choose a correct value, we pick a good default instead +of adding a knob. + +### Topology agnostic + +Users test and deploy against different topologies or might scale up from replica sets to sharded clusters. Applications +should never need to use the driver differently based on topology type. + +### Where possible, depend on server to return errors + +The features available to users depend on a server's version, topology, storage engine and configuration. So that +drivers don't need to code and test all possible variations, and to maximize forward compatibility, always let users +attempt operations and let the server error when it can't comply. Exceptions should be rare: for cases where the server +might not error and correctness is at stake. + +### Minimize administrative helpers + +Administrative helpers are methods for admin tasks, like user creation. These are rarely used and have maintenance costs +as the server changes the administrative API. Don't create administrative helpers; let users rely on "RunCommand" for +administrative commands. + +### Check wire version, not server version + +When determining server capabilities within the driver, rely only on the maxWireVersion in the hello response, not on +the X.Y.Z server version. An exception is testing server development releases, as the server bumps wire version early +and then continues to add features until the GA. + +### When in doubt, use "MUST" not "SHOULD" in specs + +Specs guide our work. While there are occasionally valid technical reasons for drivers to differ in their behavior, +avoid encouraging it with a wishy-washy "SHOULD" instead of a more assertive "MUST". + +### Defy augury + +While we have some idea of what the server will do in the future, don't design features with those expectations in mind. +Design and implement based on what is expected in the next release. + +Case Study: In designing OP_MSG, we held off on designing support for Document Sequences in Replies in drivers until the +server would support it. We subsequently decided not to implement that feature in the server. + +### The best way to see what the server does is to test it + +For any unusual case, relying on documentation or anecdote to anticipate the server's behavior in different +versions/topologies/etc. is error-prone. The best way to check the server's behavior is to use a driver or the shell and +test it directly. + +### Drivers follow semantic versioning + +Drivers should follow X.Y.Z versioning, where breaking API changes require a bump to X. See +[semver.org](https://semver.org/) for more. + +### Backward breaking behavior changes and semver + +Backward breaking behavior changes can be more dangerous and disruptive than backward breaking API changes. When +thinking about the implications of a behavior change, ask yourself what could happen if a user upgraded your library +without carefully reading the changelog and/or adequately testing the change. diff --git a/source/etc/generate-handshakeError-tests.py b/source/etc/generate-handshakeError-tests.py index 0fc3821c8d..a07a9df833 100644 --- a/source/etc/generate-handshakeError-tests.py +++ b/source/etc/generate-handshakeError-tests.py @@ -6,10 +6,16 @@ Operation = namedtuple( 'Operation', ['operation_name', 'command_name', 'object', 'arguments']) +CLIENT_BULK_WRITE_ARGUMENTS = '''models: + - insertOne: + namespace: retryable-writes-handshake-tests.coll + document: { _id: 8, x: 88 }''' + CLIENT_OPERATIONS = [ Operation('listDatabases', 'listDatabases', 'client', ['filter: {}']), Operation('listDatabaseNames', 'listDatabases', 'client', []), - Operation('createChangeStream', 'aggregate', 'client', ['pipeline: []']) + Operation('createChangeStream', 'aggregate', 'client', ['pipeline: []']), + Operation('clientBulkWrite', 'bulkWrite', 'client', [CLIENT_BULK_WRITE_ARGUMENTS]) ] RUN_COMMAND_ARGUMENTS = '''command: { ping: 1 } @@ -107,6 +113,7 @@ 'findOneAndReplace', 'insertMany', 'bulkWrite', + 'clientBulkWrite' ] ] diff --git a/source/extended-json.md b/source/extended-json.md new file mode 100644 index 0000000000..56ba417695 --- /dev/null +++ b/source/extended-json.md @@ -0,0 +1,728 @@ +# Extended JSON + +- Status: Accepted +- Minimum Server Version: N/A + +______________________________________________________________________ + +## Abstract + +MongoDB Extended JSON is a string format for representing BSON documents. This specification defines the canonical +format for representing each BSON type in the Extended JSON format. Thus, a tool that implements Extended JSON will be +able to parse the output of any tool that emits Canonical Extended JSON. It also defines a Relaxed Extended JSON format +that improves readability at the expense of type information preservation. + +## META + +The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and +"OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt). + +### Naming + +Acceptable naming deviations should fall within the basic style of the language. For example, `CanonicalExtendedJSON` +would be a name in Java, where camel-case method names are used, but in Ruby `canonical_extended_json` would be +acceptable. + +## Terms + +*Type wrapper object* - a JSON value consisting of an object with one or more `$`-prefixed keys that collectively encode +a BSON type and its corresponding value using only JSON value primitives. + +*Extended JSON* - A general term for one of many string formats based on the JSON standard that describes how to +represent BSON documents in JSON using standard JSON types and/or type wrapper objects. This specification gives a +formal definition to variations of such a format. + +*Relaxed Extended JSON* - A string format based on the JSON standard that describes BSON documents. Relaxed Extended +JSON emphasizes readability and interoperability at the expense of type preservation. + +*Canonical Extended JSON* - A string format based on the JSON standard that describes BSON documents. Canonical Extended +JSON emphasizes type preservation at the expense of readability and interoperability. + +*Legacy Extended JSON* - A string format based on the JSON standard that describes a BSON document. The Legacy Extended +JSON format does not describe a specific, standardized format, and many tools, drivers, and libraries implement Extended +JSON in conflicting ways. + +## Specification + +### Extended JSON Format + +The Extended JSON grammar extends the JSON grammar as defined in +[section 2](https://tools.ietf.org/html/rfc7159#section-2) of the +[JSON specification](https://tools.ietf.org/html/rfc7159) by augmenting the possible JSON values as defined in +[Section 3](https://tools.ietf.org/html/rfc7159#section-3). This specification defines two formats for Extended JSON: + +- Canonical Extended JSON +- Relaxed Extended JSON + +An Extended JSON value MUST conform to one of these two formats as described in the table below. + +#### Notes on grammar + +- Key order: + - Keys within Canonical Extended JSON type wrapper objects SHOULD be emitted in the order described. + - Keys within Relaxed Extended JSON type wrapper objects are unordered. +- Terms in *italics* represent types defined elsewhere in the table or in the + [JSON specification](https://tools.ietf.org/html/rfc7159). +- JSON *numbers* (as defined in [Section 6](https://tools.ietf.org/html/rfc7159#section-6) of the JSON specification) + include both integer and floating point types. For the purpose of this document, we define the following subtypes: + - Type *integer* means a JSON *number* without *frac* or *exp* components; this is expressed in the JSON spec grammar + as `[minus] int`. + - Type *non-integer* means a JSON *number* that is not an *integer*; it must include either a *frac* or *exp* + component or both. + - Type *pos-integer* means a non-negative JSON *number* without *frac* or *exp* components; this is expressed in the + JSON spec grammar as `int`. +- A *hex string* is a JSON *string* that contains only hexadecimal digits `[0-9a-f]`. It SHOULD be emitted lower-case, + but MUST be read in a case-insensitive fashion. +- `` detail the contents of a value, including type information. +- `[Square brackets]` specify a type constraint that restricts the specification to a particular range or set of values. + +#### Conversion table + +| **BSON 1.1 Type or Convention** | **Canonical Extended JSON Format** | **Relaxed Extended JSON Format** | +| ------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | +| ObjectId | {"$oid": \} | | +| Symbol | {"$symbol": _string_} | | +| String | _string_ | | +| Int32 | {"$numberInt": \<32-bit signed integer as a _string_>} | _integer_ | +| Int64 | {"$numberLong": \<64-bit signed integer as a _string_>} | _integer_ | +| Double \[finite\] | {"$numberDouble": \<64-bit signed floating point as a decimal _string_>} | _non-integer_ | +| Double \[non-finite\] | {"$numberDouble": \} | | +| Decimal128 | {"$numberDecimal": }[^1] | | +| Binary | {"$binary": {"base64": \, "subType": }} | | +| Code | {"$code": _string_} | | +| CodeWScope | {"$code": _string_, "$scope": _Document_} | | +| Document | _object_ (with Extended JSON extensions) | | +| Timestamp | {"$timestamp": {"t": _pos-integer_, "i": _pos-integer_}} | | +| Regular Expression | {"$regularExpression": {pattern: _string_, "options": \}} | | +| DBPointer | {"$dbPointer": {"$ref": \, "$id": _ObjectId_}} | | +| Datetime \[year from 1970 to 9999 inclusive\] | {"$date": {"$numberLong": \<64-bit signed integer giving millisecs relative to the epoch, as a _string_>}} | {"$date": \} | +| Datetime \[year before 1970 or after 9999\] | {"$date": {"$numberLong": \<64-bit signed integer giving millisecs relative to the epoch, as a _string_>}} | | +| DBRef[^6]

Note: this is not technically a BSON type, but it is a common convention. | {"$ref": , "$id": }

If the generator supports DBRefs with a database component, and the database component is nonempty:

{"$ref": ,

"$id": , "$db": }

DBRefs may also have other fields, which MUST appear after `$id` and `$db` (if supported). | | +| MinKey | {"$minKey": 1} | | +| MaxKey | {"$maxKey": 1} | | +| Undefined | {"$undefined": _true_} | | +| Array | _array_ | | +| Boolean | _true_ or _false_ | | +| Null | _null_ | | + +______________________________________________________________________ + +#### Representation of Non-finite Numeric Values + +Following the [Extended JSON format for the Decimal128 type](./bson-decimal128/decimal128.md#to-string-representation), +non-finite numeric values are encoded as follows: + +| **Value** | **String** | +| ------------------ | ----------- | +| Positive Infinity | `Infinity` | +| Negative Infinity | `-Infinity` | +| NaN (all variants) | `NaN` | + +For example, a BSON floating-point number with a value of negative infinity would be encoded as Extended JSON as +follows: + +``` +{"$numberDouble": "-Infinity"} +``` + +### Parsers + +An Extended JSON parser (hereafter just "parser") is a tool that transforms an Extended JSON string into another +representation, such as BSON or a language-native data structure. + +By default, a parser MUST accept values in either Canonical Extended JSON format or Relaxed Extended JSON format as +described in this specification. A parser MAY allow users to restrict parsing to only Canonical Extended JSON format or +only Relaxed Extended JSON format. + +A parser MAY also accept strings that adhere to other formats, such as Legacy Extended JSON formats emitted by old +versions of mongoexport or other tools, but only if explicitly configured to do so. + +A parser that accepts Legacy Extended JSON MUST be configurable such that a JSON text of a MongoDB query filter +containing the [regex](https://www.mongodb.com/docs/manual/reference/operator/query/regex/) query operator can be +parsed, e.g.: + +```javascript +{ "$regex": { + "$regularExpression" : { "pattern": "foo*", "options": "" } + }, + "$options" : "ix" +} +``` + +or: + +```javascript +{ "$regex": { + "$regularExpression" : { "pattern": "foo*", "options": "" } + } +} +``` + +A parser that accepts Legacy Extended JSON MUST be configurable such that a JSON text of a MongoDB query filter +containing the [type](https://www.mongodb.com/docs/manual/reference/operator/query/type/) query operator can be parsed, +e.g.: + +```javascript +{ "zipCode" : { $type : 2 } } +``` + +or: + +```javascript +{ "zipCode" : { $type : "string" } } +``` + +A parser SHOULD support at least 200 \[levels of nesting\](#levels of nesting) in an Extended JSON document but MAY set +other limits on strings it can accept as defined in [section 9](https://tools.ietf.org/html/rfc7159#section-9) of the +[JSON specification](https://tools.ietf.org/html/rfc7159). + +When parsing a JSON object other than the top-level object, the presence of a `$`-prefixed key indicates the object +could be a type wrapper object as described in the Extended JSON [Conversion table](#conversion-table). In such a case, +the parser MUST follow these rules, unless configured to allow Legacy Extended JSON, in which case it SHOULD follow +these rules: + +- Parsers MUST NOT consider key order as having significance. For example, the document + `{"$code": "function(){}", "$scope": {}}` must be considered identical to `{"$scope": {}, "$code": "function(){}"}`. + +- If the parsed object contains any of the special **keys** for a type in the [Conversion table](#conversion-table) + (e.g. `"$binary"`, `"$timestamp"`) then it must contain exactly the keys of the type wrapper. Any missing or extra + keys constitute an error. + + DBRef is the lone exception to this rule, as it is only a common convention and not a proper type. An object that + resembles a DBRef but fails to fully comply with its structure (e.g. has `$ref` but missing `$id`) MUST be left as-is + and MUST NOT constitute an error. + +- If the **keys** of the parsed object exactly match the **keys** of a type wrapper in the Conversion table, and the + **values** of the parsed object have the correct type for the type wrapper as described in the Conversion table, then + the parser MUST interpret the parsed object as a type wrapper object of the corresponding type. + +- If the **keys** of the parsed object exactly match the **keys** of a type wrapper in the Conversion table, but any of + the **values** are of an incorrect type, then the parser MUST report an error. + +- If the `$`-prefixed key does not match a known type wrapper in the Conversion table, the parser MUST NOT raise an + error and MUST leave the value as-is. See [Restrictions and limitations](#restrictions-and-limitations) for additional + information. + +#### Special rules for parsing JSON numbers + +The Relaxed Extended JSON format uses JSON numbers for several different BSON types. In order to allow parsers to use +language-native JSON decoders (which may not distinguish numeric type when parsing), the following rules apply to +parsing JSON numbers: + +- If the number is a *non-integer*, parsers SHOULD interpret it as BSON Double. +- If the number is an *integer*, parsers SHOULD interpret it as being of the smallest BSON integer type that can + represent the number exactly. If a parser is unable to represent the number exactly as an integer (e.g. a large 64-bit + number on a 32-bit platform), it MUST interpret it as a BSON Double even if this results in a loss of precision. The + parser MUST NOT interpret it as a BSON String containing a decimal representation of the number. + +#### Special rules for parsing `$uuid` fields + +As per the [UUID specification](https://github.com/mongodb/specifications/blob/master/source/uuid.rst), Binary subtype 3 +or 4 are used to represent UUIDs in BSON. Consequently, UUIDs are handled as per the convention described for the +`Binary` type in the [Conversion table](#conversion-table), e.g. the following document written with the MongoDB Python +Driver: + +```javascript +{"Binary": uuid.UUID("c8edabc3-f738-4ca3-b68d-ab92a91478a3")} +``` + +is transformed into the following (newlines and spaces added for readability): + +```javascript +{"Binary": { + "$binary": { + "base64": "yO2rw/c4TKO2jauSqRR4ow==", + "subType": "04"} + } +} +``` + +> [!NOTE] +> The above described type conversion assumes that UUID representation is set to `STANDARD`. See the +> [UUID specification](https://github.com/mongodb/specifications/blob/master/source/uuid.rst) for more information about +> UUID representations. + +While this transformation preserves BSON subtype information (since UUIDs can be represented as BSON subtype 3 *or* 4), +base64-encoding is not the standard way of representing UUIDs and using it makes comparing these values against textual +representations coming from platform libraries difficult. Consequently, we also allow UUIDs to be represented in +extended JSON as: + +```javascript +{"$uuid": } +``` + +The rules for generating the canonical string representation of a UUID are defined in +[RFC 4122 Section 3](https://tools.ietf.org/html/rfc4122#section-3). Use of this format result in a more readable +extended JSON representation of the UUID from the previous example: + +```javascript +{"Binary": { + "$uuid": "c8edabc3-f738-4ca3-b68d-ab92a91478a3" + } +} +``` + +Parsers MUST interpret the `$uuid` key as BSON Binary subtype 4. Parsers MUST accept textual representations of UUIDs +that omit the URN prefix (usually `urn:uuid:`). Parsers MAY also accept textual representations of UUIDs that omit the +hyphens between hex character groups (e.g. `c8edabc3f7384ca3b68dab92a91478a3`). + +### Generators + +An Extended JSON generator (hereafter just "generator") produces strings in an Extended JSON format. + +A generator MUST allow users to produce strings in either the Canonical Extended JSON format or the Relaxed Extended +JSON format. If generators provide a default format, the default SHOULD be the Relaxed Extended JSON format. + +A generator MAY be capable of exporting strings that adhere to other formats, such as Legacy Extended JSON formats. + +A generator SHOULD support at least 100 \[levels of nesting\](#levels of nesting) in a BSON document. + +#### Transforming BSON + +Given a BSON document (e.g. a buffer of bytes meeting the requirements of the BSON specification), a generator MUST use +the corresponding JSON values or Extended JSON type wrapper objects for the BSON type given in the Extended JSON +[Conversion table](#conversion-table) for the desired format. When transforming a BSON document into Extended JSON text, +a generator SHOULD emit the JSON keys and values in the same order as given in the BSON document. + +#### Transforming Language-Native data + +Given language-native data (e.g. type primitives, container types, classes, etc.), if there is a semantically-equivalent +BSON type for a given language-native type, a generator MUST use the corresponding JSON values or Extended JSON type +wrapper objects for the BSON type given in the Extended JSON [Conversion table](#conversion-table) for the desired +format. For example, a Python `datetime` object must be represented the same as a BSON datetime type. A generator SHOULD +error if a language-native type has no semantically-equivalent BSON type. + +#### Format and Method Names + +The following format names SHOULD be used for selecting formats for generator output: + +- `canonicalExtendedJSON` (references Canonical Extended JSON as described in this specification) +- `relaxedExtendedJSON` (references Relaxed Extended JSON as described in this specification) +- `legacyExtendedJSON` (if supported: references Legacy Extended JSON, with implementation-defined behavior) + +Generators MAY use these format names as part of function/method names or MAY use them as arguments or constants, as +needed. + +If a generator provides a generic `to_json` or `to_extended_json` method, it MUST default to producing Relaxed Extended +JSON or MUST be deprecated in favor of a spec-compliant method. + +### Restrictions and limitations + +Extended JSON is designed primarily for testing and human inspection of BSON documents. It is not designed to reliably +round-trip BSON documents. One fundamental limitation is that JSON objects are inherently unordered and BSON objects are +ordered. + +Further, Extended JSON uses `$`-prefixed keys in type wrappers and has no provision for escaping a leading `$` used +elsewhere in a document. This means that the Extended JSON representation of a document with `$`-prefixed keys could be +indistinguishable from another document with a type wrapper with the same keys. + +Extended JSON formats SHOULD NOT be used in contexts where `$`-prefixed keys could exist in BSON documents (with the +exception of the DBRef convention, which is accounted for in this spec). + +## Test Plan + +Drivers, tools, and libraries can test their compliance to this specification by running the tests in version 2.0 and +above of the [BSON Corpus Test Suite](./bson-corpus/bson-corpus.md). + +## Examples + +### Canonical Extended JSON Example + +Consider the following document, written with the MongoDB Python Driver: + +```javascript +{ + "_id": bson.ObjectId("57e193d7a9cc81b4027498b5"), + "String": "string", + "Int32": 42, + "Int64": bson.Int64(42), + "Double": 42.42, + "Decimal": bson.Decimal128("1234.5"), + "Binary": uuid.UUID("c8edabc3-f738-4ca3-b68d-ab92a91478a3"), + "BinaryUserDefined": bson.Binary(b'123', 80), + "Code": bson.Code("function() {}"), + "CodeWithScope": bson.Code("function() {}", scope={}), + "Subdocument": {"foo": "bar"}, + "Array": [1, 2, 3, 4, 5], + "Timestamp": bson.Timestamp(42, 1), + "RegularExpression": bson.Regex("foo*", "xi"), + "DatetimeEpoch": datetime.datetime.utcfromtimestamp(0), + "DatetimePositive": datetime.datetime.max, + "DatetimeNegative": datetime.datetime.min, + "True": True, + "False": False, + "DBRef": bson.DBRef( + "collection", bson.ObjectId("57e193d7a9cc81b4027498b1"), database="database"), + "DBRefNoDB": bson.DBRef( + "collection", bson.ObjectId("57fd71e96e32ab4225b723fb")), + "Minkey": bson.MinKey(), + "Maxkey": bson.MaxKey(), + "Null": None +} +``` + +The above document is transformed into the following (newlines and spaces added for readability): + +```javascript +{ + "_id": { + "$oid": "57e193d7a9cc81b4027498b5" + }, + "String": "string", + "Int32": { + "$numberInt": "42" + }, + "Int64": { + "$numberLong": "42" + }, + "Double": { + "$numberDouble": "42.42" + }, + "Decimal": { + "$numberDecimal": "1234.5" + }, + "Binary": { + "$binary": { + "base64": "yO2rw/c4TKO2jauSqRR4ow==", + "subType": "04" + } + }, + "BinaryUserDefined": { + "$binary": { + "base64": "MTIz", + "subType": "80" + } + }, + "Code": { + "$code": "function() {}" + }, + "CodeWithScope": { + "$code": "function() {}", + "$scope": {} + }, + "Subdocument": { + "foo": "bar" + }, + "Array": [ + {"$numberInt": "1"}, + {"$numberInt": "2"}, + {"$numberInt": "3"}, + {"$numberInt": "4"}, + {"$numberInt": "5"} + ], + "Timestamp": { + "$timestamp": { "t": 42, "i": 1 } + }, + "RegularExpression": { + "$regularExpression": { + "pattern": "foo*", + "options": "ix" + } + }, + "DatetimeEpoch": { + "$date": { + "$numberLong": "0" + } + }, + "DatetimePositive": { + "$date": { + "$numberLong": "253402300799999" + } + }, + "DatetimeNegative": { + "$date": { + "$numberLong": "-62135596800000" + } + }, + "True": true, + "False": false, + "DBRef": { + "$ref": "collection", + "$id": { + "$oid": "57e193d7a9cc81b4027498b1" + }, + "$db": "database" + }, + "DBRefNoDB": { + "$ref": "collection", + "$id": { + "$oid": "57fd71e96e32ab4225b723fb" + } + }, + "Minkey": { + "$minKey": 1 + }, + "Maxkey": { + "$maxKey": 1 + }, + "Null": null +} +``` + +### Relaxed Extended JSON Example + +In Relaxed Extended JSON, the example document is transformed similarly to Canonical Extended JSON, with the exception +of the following keys (newlines and spaces added for readability): + +```javascript +{ + ... + "Int32": 42, + "Int64": 42, + "Double": 42.42, + ... + "DatetimeEpoch": { + "$date": "1970-01-01T00:00:00.000Z" + }, + ... +} +``` + +## Motivation for Change + +There existed many Extended JSON parser and generator implementations prior to this specification that used conflicting +formats, since there was no agreement on the precise format of Extended JSON. This resulted in problems where the output +of some generators could not be consumed by some parsers. + +MongoDB drivers needed a single, standard Extended JSON format for testing that covers all BSON types. However, there +were BSON types that had no defined Extended JSON representation. This spec primarily addresses that need, but provides +for slightly broader use as well. + +## Design Rationale + +### Of Relaxed and Canonical Formats + +There are various use cases for expressing BSON documents in a text rather that binary format. They broadly fall into +two categories: + +- Type preserving: for things like testing, where one has to describe the expected form of a BSON document, it's helpful + to be able to precisely specify expected types. In particular, numeric types need to differentiate between Int32, + Int64 and Double forms. +- JSON-like: for things like a web API, where one is sending a document (or a projection of a document) that only uses + ordinary JSON type primitives, it's desirable to represent numbers in the native JSON format. This output is also the + most human readable and is useful for debugging and documentation. + +The two formats in this specification address these two categories of use cases. + +### Of Parsers and Generators + +Parsers need to accept any valid Extended JSON string that a generator can produce. Parsers and generators are permitted +to accept and output strings in other formats as well for backwards compatibility. + + + +Acceptable nesting depth has implications for resource usage so unlimited nesting is not permitted. + +Generators support at least 100 levels of nesting in a BSON document being transformed to Extended JSON. This aligns +with MongoDB's own limitation of 100 levels of nesting. + +Parsers support at least 200 levels of nesting in Extended JSON text, since the Extended JSON language can double the +level of apparent nesting of a BSON document by wrapping certain types in their own documents. + +### Of Canonical Type Wrapper Formats + +Prior to this specification, BSON types fell into three categories with respect to Legacy Extended JSON: + +1. A single, portable representation for the type already existed. +2. Multiple representations for the type existed among various Extended JSON generators, and those representations were + in conflict with each other or with current portability goals. +3. No Legacy Extended JSON representation existed. + +If a BSON type fell into category (1), this specification just declares that form to be canonical, since all drivers, +tools, and libraries already know how to parse or output this form. There are two exceptions: + +#### RegularExpression + +The form `{"$regex: , $options: "}` has until this specification been canonical. The change to +`{"$regularExpression": {pattern: , "options": "}}` is motivated by a conflict between the previous +canonical form and the `$regex` MongoDB query operator. The form specified here disambiguates between the two, such that +a parser can accept any MongoDB query filter, even one containing the `$regex` operator. + +#### Binary + +The form `{"$binary": "AQIDBAU=", "$type": "80"}` has until this specification been canonical. The change to +`{"$binary": {"base64": "AQIDBAU=", "subType": "80"}}` is motivated by a conflict between the previous canonical form +and the `$type` MongoDB query operator. The form specified here disambiguates between the two, such that a parser can +accept any MongoDB query filter, even one containing the `$type` operator. + +#### Reconciled type wrappers + +If a BSON type fell into category (2), this specification selects a new common representation for the type to be +canonical. Conflicting formats were gathered by surveying a number of Extended JSON generators, including the MongoDB +Java Driver (version 3.3.0), the MongoDB Python Driver (version 3.4.0.dev0), the MongoDB Extended JSON module on NPM +(version 1.7.1), and each minor version of mongoexport from 2.4.14 through 3.3.12. When possible, we set the "strict" +option on the JSON codec. The following BSON types had conflicting Extended JSON representations: + +##### Binary + +Some implementations write the Extended JSON form of a Binary object with a strict two-hexadecimal digit subtype (e.g. +they output a leading `0` for subtypes \< 16). However, the NPM mongodb-extended-json module and Java driver use a +single hexadecimal digit to represent subtypes less than 16. This specification makes both one- and two-digit +representations acceptable. + +##### Code + +Mongoexport 2.4 does not quote the `Code` value when writing out the extended JSON form of a BSON Code object. All other +implementations do so. This spec canonicalises the form where the Javascript code is quoted, since the latter form +adheres to the JSON specification and the former does not. As an additional note, the NPM mongodb-extended-json module +uses the form `{"code": ""}`, omitting the dollar sign (`$`) from the key. This specification does not +accommodate the eccentricity of a single library. + +##### CodeWithScope + +In addition to the same variants as BSON Code types, there are other variations when turning CodeWithScope objects into +Extended JSON. Mongoexport 2.4 and 2.6 omit the scope portion of CodeWithScope if it is empty, making the output +indistinguishable from a Code type. All other implementations include the empty scope. This specification therefore +canonicalises the form where the scope is always included. The presence of `$scope` is what differentiates Code from +CodeWithScope. + +##### Datetime + +Mongoexport 2.4 and the Java driver always transform a Datetime object into an Extended JSON string of the form +`{"$date": }`. This form has the problem of a potential loss of precision or range on the Datetimes that +can be represented. Mongoexport 2.6 transforms Datetime objects into an extended JSON string of the form +`{"$date": }`for dates starting at or after the Unix epoch (UTC). Dates prior to the +epoch take the form `{"$date": {"$numberLong": ""}}`. Starting in version 3.0, mongoexport always turns +Datetime objects into strings of the form `{"$date": }`. The NPM mongodb-extended-json +module does the same. The Python driver can also transform Datetime objects into strings like +`{"$date": {"$numberLong": ""}}`. This specification canonicalises this form, since this form is the +most portable. In Relaxed Extended JSON format, this specification provides for ISO-8601 representation for better +readability, but limits it to a portable subset, from the epoch to the end of the largest year that can be represented +with four digits. This should encompass most typical use of dates in applications. + +##### DBPointer + +Mongoexport 2.4 and 2.6 use the form`{"$ref": , "$id": }`. All other implementations studied +include the canonical `ObjectId` form:`{"$ref": , "$id": {"$oid": }}`. Neither of these forms are +distinguishable from that of DBRef, so this specification creates a new format: +`{"$dbPointer": {"$ref": , "$id": {"$oid": }}}`. + +##### Newly-added type wrappers . + +If a BSON type fell into category (3), above, this specification creates a type wrapper format for the type. The +following new Extended JSON type wrappers are introduced by this spec: + +- `$dbPointer`- See above. +- `$numberInt` - This is used to preserve the "int32" BSON type in Canonical Extended JSON. Without using `$numberInt`, + this type will be indistinguishable from a double in certain languages where the distinction does not exist, such as + Javascript. +- `$numberDouble` - This is used to preserve the `double`type in Canonical Extended JSON, as some JSON generators might + omit a trailing ".0" for integral types.\ + It also supports representing non-finite values like NaN or Infinity which + are prohibited in the JSON specification for numbers. +- `$symbol` - The use of the `$symbol` key preserves the symbol type in Canonical Extended JSON, distinguishing it from + JSON strings. + +### Reference Implementation + +\[*Canonical Extended JSON format reference implementation needs to be updated*\] PyMongo implements the Canonical +Extended JSON format, which must be chosen by selecting the right option on the `JSONOptions` object:: + +```python +from bson.json_util import dumps, DatetimeRepresentation, CANONICAL_JSON_OPTIONS +dumps(document, json_options=CANONICAL_JSON_OPTIONS) +``` + +\[*Relaxed Extended JSON format reference implementation is TBD*\] + +### Implementation Notes + +#### JSON File Format + +Some applications like mongoexport may wish to write multiple Extended JSON documents to a single file. One way to do +this is to list each JSON document one-per-line. When doing this, it is important to ensure that special characters like +newlines are encoded properly (e.g.`n`). + +#### Duplicate Keys + +The BSON specification does not prohibit duplicate key names within the same BSON document, but provides no semantics +for the interpretation of duplicate keys. The JSON specification says that names within an object should be unique, and +many JSON libraries are incapable of handling this scenario. This specification is silent on the matter, so as not to +conflict with a future change by either specification. + +### Future Work + +This specification will need to be amended if future BSON types are added to the BSON specification. + +## Q&A + +**Q**. Why was version 2 of the spec necessary? **A**. After Version 1 was released, several stakeholders raised +concerns that not providing an option to output BSON numbers as ordinary JSON numbers limited the utility of Extended +JSON for common historical uses. We decided to provide a second format option and more clearly distinguish the use cases +(and limitations) inherent in each format. + +**Q**. My BSON parser doesn't distinguish every BSON type. Does my Extended JSON generator need to distinguish these +types?\ +**A**. No. Some BSON parsers do not emit a unique type for each BSON type, making round-tripping BSON through +such libraries impossible without changing the document. For example, a `DBPointer` will be parsed into a `DBRef` by +PyMongo. In such cases, a generator must emit the Extended JSON form for whatever type the BSON parser emitted. It does +not need to preserve type information when that information has been lost by the BSON parser. + +**Q**. How can implementations which require backwards compatibility with Legacy Extended JSON, in which BSON regular +expressions were represented with `$regex`, handle parsing of extended JSON test representing a MongoDB query filter +containing the `$regex` operator?\ +**A**. An implementation can handle this in a number of ways: - Introduce an +enumeration that determines the behavior of the parser. If the value is LEGACY, it will parse `$regex`and not treat +`$regularExpression` specially, and if the value is CANONICAL, it will parse `$regularExpression` and not treat `$regex` +specially. - Support both legacy and canonical forms in the parser without requiring the application to specify one or +the other. Making that work for the `$regex` query operator use case will require that the rules set forth in the 1.0.0 +version of this specification are followed for `$regex`; specifically, that a document with a `$regex` key whose value +is a JSON object should be parsed as a normal document and not reported as an error. + +**Q**. How can implementations which require backwards compatibility with Legacy Extended JSON, in which BSON binary +values were represented like `{"$binary": "AQIDBAU=", "$type": "80"}`, handle parsing of extended JSON test representing +a MongoDB query filter containing the `$type`operator?\ +**A**. An implementation can handle this in a number of ways:\ +\- +Introduce an enumeration that determines the behavior of the parser. If the value is LEGACY, it will parse the new +binary form and not treat the legacy one specially, and if the value is CANONICAL, it will parse the new form and not +treat the legacy form specially. - Support both legacy and canonical forms in the parser without requiring the +application to specify one or the other. Making that work for the `$type` query operator use case will require that the +rules set forth in the 1.0.0 version of this specification are followed for `$type`; specifically, that a document with +a `$type` key whose value is an integral type, or a document with a `$type` key but without a `$binary` key, should be +parsed as a normal document and not reported as an error. + +**Q**. Sometimes I see the term "extjson" used in other specifications. Is "extjson" related to this +specification?\ +**A**. Yes, "extjson" is short for "Extended JSON". + +### Changelog + +- 2024-05-29: Migrated from reStructuredText to Markdown. +- 2022-10-05: Remove spec front matter and reformat changelog. +- 2021-05-26: + - Remove any mention of extra dollar-prefixed keys being prohibited in a DBRef. MongoDB 5.0 and compatible drivers no + longer enforce such restrictions. + - Objects that resemble a DBRef without fully complying to its structure should be left as-is during parsing. - + 2020-09-01: Note that `$`-prefixed keys not matching a known type MUST be left as-is when parsing. This is + patch-level change as this behavior was already required in the BSON corpus tests ("Document with keys that start + with $"). +- 2020-09-08: + - Added support for parsing `$uuid` fields as BSON Binary subtype 4. + - Changed the example to using the MongoDB Python Driver. It previously used the MongoDB Java Driver. The new example + excludes the following BSON types that are unsupported in Python - `Symbol`,`SpecialFloat`,`DBPointer`, and + `Undefined`. Transformations for these types are now only documented in the `Conversion table`\_. +- 2017-07-20: + - Bumped specification to version 2.0. + - Added "Relaxed" format. + - Changed BSON timestamp type wrapper back to `{"t": *int*, "i": *int*}` for backwards compatibility. (The change in + v1 to unsigned 64-bit string was premature optimization) + - Changed BSON regular expression type wrapper to `{"$regularExpression": {pattern: *string*, "options": *string*"}}`. + - Changed BSON binary type wrapper to + `{"$binary": {"base64": , "subType": }}` + - Added "Restrictions and limitations" section. + - Clarified parser and generator rules. +- 2017-02-01: Initial specification version 1.0. + +[^1]: This MUST conform to the [Decimal128 specification](./bson-decimal128/decimal128.md#writing-to-extended-json) + +[^2]: BSON Regular Expression options MUST be in alphabetical order. + +[^3]: See [the docs manual](https://www.mongodb.com/docs/manual/reference/glossary/#term-namespace) + +[^4]: See [https://tools.ietf.org/html/rfc3339#section-5.6](https://tools.ietf.org/html/rfc3339#section-5.6) + +[^5]: Fractional seconds SHOULD have exactly 3 decimal places if the fractional part is non-zero. Otherwise, fractional + seconds SHOULD be omitted if zero. + +[^6]: See [the docs manual](https://www.mongodb.com/docs/manual/reference/database-references/#dbrefs) diff --git a/source/extended-json.rst b/source/extended-json.rst index 9553f45f79..fc32e1d430 100644 --- a/source/extended-json.rst +++ b/source/extended-json.rst @@ -1,956 +1,4 @@ -============= -Extended JSON -============= -:Status: Accepted -:Minimum Server Version: N/A - -.. contents:: - --------- - -Abstract -======== - -MongoDB Extended JSON is a string format for representing BSON documents. This -specification defines the canonical format for representing each BSON type in -the Extended JSON format. Thus, a tool that implements Extended JSON will be -able to parse the output of any tool that emits Canonical Extended JSON. It also -defines a Relaxed Extended JSON format that improves readability at the expense -of type information preservation. - -META -==== - -The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", -"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be -interpreted as described in `RFC 2119 `_. - -Naming ------- - -Acceptable naming deviations should fall within the basic style of the -language. For example, ``CanonicalExtendedJSON`` would be a name in Java, where -camel-case method names are used, but in Ruby ``canonical_extended_json`` would -be acceptable. - -Terms -===== - -*Type wrapper object* - a JSON value consisting of an object with one or more -``$``-prefixed keys that collectively encode a BSON type and its corresponding -value using only JSON value primitives. - -*Extended JSON* - A general term for one of many string formats based on the -JSON standard that describes how to represent BSON documents in JSON using -standard JSON types and/or type wrapper objects. This specification gives a -formal definition to variations of such a format. - -*Relaxed Extended JSON* - A string format based on the JSON standard that -describes BSON documents. Relaxed Extended JSON emphasizes readability and -interoperability at the expense of type preservation. - -*Canonical Extended JSON* - A string format based on the JSON standard that -describes BSON documents. Canonical Extended JSON emphasizes type preservation -at the expense of readability and interoperability. - -*Legacy Extended JSON* - A string format based on the JSON standard that -describes a BSON document. The Legacy Extended JSON format does not describe -a specific, standardized format, and many tools, drivers, and libraries -implement Extended JSON in conflicting ways. - -Specification -============= - -Extended JSON Format --------------------- - -The Extended JSON grammar extends the JSON grammar as defined in `section 2`_ of -the `JSON specification`_ by augmenting the possible JSON values as defined in -`Section 3`_. This specification defines two formats for Extended JSON: - -* Canonical Extended JSON -* Relaxed Extended JSON - -An Extended JSON value MUST conform to one of these two formats as described -in the table below. - -.. _section 2: https://tools.ietf.org/html/rfc7159#section-2 -.. _section 3: https://tools.ietf.org/html/rfc7159#section-3 - -Notes on grammar -................ - -* Key order: - - * Keys within Canonical Extended JSON type wrapper objects SHOULD be emitted - in the order described. - - * Keys within Relaxed Extended JSON type wrapper objects are unordered. - -* Terms in *italics* represent types defined elsewhere in the table or in the - `JSON specification`_. - -* JSON *numbers* (as defined in `Section 6`_ of the JSON specification) include - both integer and floating point types. For the purpose of this document, we - define the following subtypes: - - * Type *integer* means a JSON *number* without *frac* or *exp* components; - this is expressed in the JSON spec grammar as ``[minus] int``. - - * Type *non-integer* means a JSON *number* that is not an *integer*; it - must include either a *frac* or *exp* component or both. - - * Type *pos-integer* means a non-negative JSON *number* without *frac* - or *exp* components; this is expressed in the JSON spec grammar as ``int``. - -* A *hex string* is a JSON *string* that contains only hexadecimal - digits [0-9a-f]. It SHOULD be emitted lower-case, but MUST be read - in a case-insensitive fashion. - -* detail the contents of a value, including type information. - -* \[Square brackets\] specify a type constraint that restricts the specification - to a particular range or set of values. - -.. _section 6: https://tools.ietf.org/html/rfc7159#section-6 - -Conversion table -................ - -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|**BSON 1.1 Type or |**Canonical Extended JSON Format** |**Relaxed Extended JSON Format** | -|Convention** | | | -+====================+==========================================================+=======================================================+ -|ObjectId |{"$oid": | -| |string*>} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Symbol |{"$symbol": *string*} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|String |*string* | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Int32 |{"$numberInt": <32-bit signed integer as a *string*>} | *integer* | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Int64 |{"$numberLong": <64-bit signed integer as a *string*>} | *integer* | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Double \[finite\] |{"$numberDouble": <64-bit signed floating point as a | *non-integer* | -| |decimal *string*>} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Double |{"$numberDouble": | -|\[non-finite\] |"-Infinity", or "NaN">} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Decimal128 |{"$numberDecimal": } [#]_ | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Binary |{"$binary": {"base64": | -| |``=``) payload as a *string*>, "subType": }} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Code |{"$code": *string*} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|CodeWScope |{"$code": *string*, "$scope": *Document*} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Document |*object* (with Extended JSON extensions) | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Timestamp |{"$timestamp": {"t": *pos-integer*, "i": *pos-integer*}} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Regular Expression |{"$regularExpression": {pattern: *string*, | | -| |"options": }} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|DBPointer |{"$dbPointer": {"$ref": , | | -| |"$id": *ObjectId*}} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Datetime |{"$date": {"$numberLong": <64-bit signed integer | {"$date": }} | as described in RFC-3339 [#]_ with maximum time | -|to 9999 inclusive\] | | precision of milliseconds [#]_ as a *string*>} | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Datetime |{"$date": {"$numberLong": <64-bit signed integer | | -|\[year before 1970 |giving millisecs relative to the epoch, as a *string*>}} | | -|or after 9999\] | | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|DBRef [#]_ |{"$ref": , "$id": | | -| |} | | -|Note: this is not | | | -|technically a BSON |If the generator supports DBRefs with a database | | -|type, but it is a |component, and the database component is nonempty: | | -|common convention. | | | -| |{"$ref": , | | -| | "$id": , | | -| | "$db": } | | -| | | | -| |DBRefs may also have other fields, which MUST appear after| | -| |``$id`` and ``$db`` (if supported). | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|MinKey |{"$minKey": 1} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|MaxKey |{"$maxKey": 1} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Undefined |{"$undefined": *true*} | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Array |*array* | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Boolean |*true* or *false* | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ -|Null |*null* | | -+--------------------+----------------------------------------------------------+-------------------------------------------------------+ - -.. [#] This MUST conform to the `Decimal128 specification`_ - -.. [#] BSON Regular Expression options MUST be in alphabetical order. - -.. [#] See https://www.mongodb.com/docs/manual/reference/glossary/#term-namespace - -.. [#] See https://tools.ietf.org/html/rfc3339#section-5.6 - -.. [#] Fractional seconds SHOULD have exactly 3 decimal places if the fractional part - is non-zero. Otherwise, fractional seconds SHOULD be omitted if zero. - -.. [#] See https://www.mongodb.com/docs/manual/reference/database-references/#dbrefs - -.. _Decimal128 specification: ./bson-decimal128/decimal128.md#writing-to-extended-json - -Representation of Non-finite Numeric Values -........................................... - -Following the `Extended JSON format for the Decimal128 type`_, non-finite numeric -values are encoded as follows: - -+----------------------------------------+----------------------------------------+ -|**Value** |**String** | -+========================================+========================================+ -|Positive Infinity |``Infinity`` | -+----------------------------------------+----------------------------------------+ -|Negative Infinity |``-Infinity`` | -+----------------------------------------+----------------------------------------+ -|NaN (all variants) |``NaN`` | -+----------------------------------------+----------------------------------------+ - -.. _Extended JSON format for the Decimal128 type: ./bson-decimal128/decimal128.md#to-string-representation - -For example, a BSON floating-point number with a value of negative infinity -would be encoded as Extended JSON as follows:: - - {"$numberDouble": "-Infinity"} - -Parsers -------- - -An Extended JSON parser (hereafter just "parser") is a tool that transforms an -Extended JSON string into another representation, such as BSON or a -language-native data structure. - -By default, a parser MUST accept values in either Canonical Extended JSON -format or Relaxed Extended JSON format as described in this specification. A -parser MAY allow users to restrict parsing to only Canonical Extended JSON -format or only Relaxed Extended JSON format. - -A parser MAY also accept strings that adhere to other formats, such as -Legacy Extended JSON formats emitted by old versions of mongoexport or -other tools, but only if explicitly configured to do so. - -A parser that accepts Legacy Extended JSON MUST be configurable such that a JSON -text of a MongoDB query filter containing the `regex`_ query operator can be -parsed, e.g.:: - - { "$regex": { - "$regularExpression" : { "pattern": "foo*", "options": "" } - }, - "$options" : "ix" - } - -or:: - - { "$regex": { - "$regularExpression" : { "pattern": "foo*", "options": "" } - } - } - -A parser that accepts Legacy Extended JSON MUST be configurable such that a JSON -text of a MongoDB query filter containing the `type`_ query operator can be -parsed, e.g.:: - - { "zipCode" : { $type : 2 } } - -or:: - - { "zipCode" : { $type : "string" } } - -A parser SHOULD support at least 200 `levels of nesting`_ in an Extended JSON -document but MAY set other limits on strings it can accept as defined in -`section 9`_ of the `JSON specification`_. - -When parsing a JSON object other than the top-level object, the presence of a -``$``-prefixed key indicates the object could be a type wrapper object as -described in the Extended JSON `Conversion table`_. In such a case, the parser -MUST follow these rules, unless configured to allow Legacy Extended JSON, -in which case it SHOULD follow these rules: - -* Parsers MUST NOT consider key order as having significance. For example, - the document ``{"$code": "function(){}", "$scope": {}}`` must be considered - identical to ``{"$scope": {}, "$code": "function(){}"}``. - -* If the parsed object contains any of the special **keys** for a type in the - `Conversion table`_ (e.g. ``"$binary"``, ``"$timestamp"``) then it must - contain exactly the keys of the type wrapper. Any missing or extra keys - constitute an error. - - DBRef is the lone exception to this rule, as it is only a common convention - and not a proper type. An object that resembles a DBRef but fails to fully - comply with its structure (e.g. has ``$ref`` but missing ``$id``) MUST be left - as-is and MUST NOT constitute an error. - -* If the **keys** of the parsed object exactly match the **keys** of a type - wrapper in the Conversion table, and the **values** of the parsed object have - the correct type for the type wrapper as described in the Conversion table, - then the parser MUST interpret the parsed object as a type wrapper object of - the corresponding type. - -* If the **keys** of the parsed object exactly match the **keys** of a type - wrapper in the Conversion table, but any of the **values** are of an incorrect - type, then the parser MUST report an error. - -* If the ``$``-prefixed key does not match a known type wrapper in the - Conversion table, the parser MUST NOT raise an error and MUST leave the value - as-is. See `Restrictions and limitations`_ for additional information. - -.. _regex: https://www.mongodb.com/docs/manual/reference/operator/query/regex/ - -.. _type: https://www.mongodb.com/docs/manual/reference/operator/query/type/ - -.. _section 9: https://tools.ietf.org/html/rfc7159#section-9 - -.. _JSON specification: https://tools.ietf.org/html/rfc7159 - -Special rules for parsing JSON numbers -...................................... - -The Relaxed Extended JSON format uses JSON numbers for several different -BSON types. In order to allow parsers to use language-native JSON decoders -(which may not distinguish numeric type when parsing), the following rules apply -to parsing JSON numbers: - -* If the number is a *non-integer*, parsers SHOULD interpret it as BSON Double. - -* If the number is an *integer*, parsers SHOULD interpret it as being of the - smallest BSON integer type that can represent the number exactly. If a parser - is unable to represent the number exactly as an integer (e.g. a large 64-bit - number on a 32-bit platform), it MUST interpret it as a BSON Double even if - this results in a loss of precision. The parser MUST NOT interpret it as a - BSON String containing a decimal representation of the number. - -Special rules for parsing ``$uuid`` fields -.......................................... - -As per the `UUID specification`_, Binary subtype 3 or 4 are used to -represent UUIDs in BSON. Consequently, UUIDs are handled as per the -convention described for the ``Binary`` type in the `Conversion table`_, -e.g. the following document written with the MongoDB Python Driver:: - - {"Binary": uuid.UUID("c8edabc3-f738-4ca3-b68d-ab92a91478a3")} - -is transformed into the following (newlines and spaces added for readability):: - - {"Binary": { - "$binary": { - "base64": "yO2rw/c4TKO2jauSqRR4ow==", - "subType": "04"} - } - } - -.. note:: The above described type conversion assumes that - UUID representation is set to ``STANDARD``. See the `UUID specification`_ - for more information about UUID representations. - -While this transformation preserves BSON subtype information (since -UUIDs can be represented as BSON subtype 3 *or* 4), base64-encoding -is not the standard way of representing UUIDs and using it makes comparing -these values against textual representations coming from platform libraries -difficult. Consequently, we also allow UUIDs to be represented in extended -JSON as:: - - {"$uuid": } - -The rules for generating the canonical string representation of a -UUID are defined in -`RFC 4122 Section 3 `_. -Use of this format result in a more readable extended JSON -representation of the UUID from the previous example:: - - {"Binary": { - "$uuid": "c8edabc3-f738-4ca3-b68d-ab92a91478a3" - } - } - -Parsers MUST interpret the ``$uuid`` key as BSON Binary subtype 4. -Parsers MUST accept textual representations of UUIDs that omit the -URN prefix (usually ``urn:uuid:``). Parsers MAY also accept textual -representations of UUIDs that omit the hyphens between hex character -groups (e.g. ``c8edabc3f7384ca3b68dab92a91478a3``). - -.. _UUID specification: https://github.com/mongodb/specifications/blob/master/source/uuid.rst - -Generators ----------- - -An Extended JSON generator (hereafter just "generator") produces strings in an -Extended JSON format. - -A generator MUST allow users to produce strings in either the Canonical -Extended JSON format or the Relaxed Extended JSON format. If generators -provide a default format, the default SHOULD be the Relaxed Extended JSON -format. - -A generator MAY be capable of exporting strings that adhere to other -formats, such as Legacy Extended JSON formats. - -A generator SHOULD support at least 100 `levels of nesting`_ in a BSON -document. - -Transforming BSON -................. - -Given a BSON document (e.g. a buffer of bytes meeting the requirements of the -BSON specification), a generator MUST use the corresponding JSON values or -Extended JSON type wrapper objects for the BSON type given in the Extended JSON -`Conversion table`_ for the desired format. When transforming a BSON document -into Extended JSON text, a generator SHOULD emit the JSON keys and values in -the same order as given in the BSON document. - -Transforming Language-Native data -................................. - -Given language-native data (e.g. type primitives, container types, classes, -etc.), if there is a semantically-equivalent BSON type for a given -language-native type, a generator MUST use the corresponding JSON values or -Extended JSON type wrapper objects for the BSON type given in the Extended JSON -`Conversion table`_ for the desired format. For example, a Python ``datetime`` -object must be represented the same as a BSON datetime type. A generator -SHOULD error if a language-native type has no semantically-equivalent BSON -type. - -Format and Method Names -....................... - -The following format names SHOULD be used for selecting formats for generator -output: - -* ``canonicalExtendedJSON`` (references Canonical Extended JSON as described in - this specification) - -* ``relaxedExtendedJSON`` (references Relaxed Extended JSON as described in - this specification) - -* ``legacyExtendedJSON`` (if supported: references Legacy Extended JSON, - with implementation-defined behavior) - -Generators MAY use these format names as part of function/method names or MAY -use them as arguments or constants, as needed. - -If a generator provides a generic `to_json` or `to_extended_json` method, it -MUST default to producing Relaxed Extended JSON or MUST be deprecated in -favor of a spec-compliant method. - -Restrictions and limitations ----------------------------- - -Extended JSON is designed primarily for testing and human inspection of BSON -documents. It is not designed to reliably round-trip BSON documents. One -fundamental limitation is that JSON objects are inherently unordered and -BSON objects are ordered. - -Further, Extended JSON uses ``$``-prefixed keys in type wrappers and has no -provision for escaping a leading ``$`` used elsewhere in a document. This -means that the Extended JSON representation of a document with ``$``-prefixed -keys could be indistinguishable from another document with a type wrapper with -the same keys. - -Extended JSON formats SHOULD NOT be used in contexts where ``$``-prefixed keys -could exist in BSON documents (with the exception of the DBRef convention, -which is accounted for in this spec). - -Test Plan -========= - -Drivers, tools, and libraries can test their compliance to this specification by -running the tests in version 2.0 and above of the `BSON Corpus Test Suite`_. - -.. _BSON Corpus Test Suite: https://github.com/mongodb/specifications/blob/master/source/bson-corpus/bson-corpus.rst - -Examples -======== - -Canonical Extended JSON Example -------------------------------- - -Consider the following document, written with the MongoDB Python Driver:: - - { - "_id": bson.ObjectId("57e193d7a9cc81b4027498b5"), - "String": "string", - "Int32": 42, - "Int64": bson.Int64(42), - "Double": 42.42, - "Decimal": bson.Decimal128("1234.5"), - "Binary": uuid.UUID("c8edabc3-f738-4ca3-b68d-ab92a91478a3"), - "BinaryUserDefined": bson.Binary(b'123', 80), - "Code": bson.Code("function() {}"), - "CodeWithScope": bson.Code("function() {}", scope={}), - "Subdocument": {"foo": "bar"}, - "Array": [1, 2, 3, 4, 5], - "Timestamp": bson.Timestamp(42, 1), - "RegularExpression": bson.Regex("foo*", "xi"), - "DatetimeEpoch": datetime.datetime.utcfromtimestamp(0), - "DatetimePositive": datetime.datetime.max, - "DatetimeNegative": datetime.datetime.min, - "True": True, - "False": False, - "DBRef": bson.DBRef( - "collection", bson.ObjectId("57e193d7a9cc81b4027498b1"), database="database"), - "DBRefNoDB": bson.DBRef( - "collection", bson.ObjectId("57fd71e96e32ab4225b723fb")), - "Minkey": bson.MinKey(), - "Maxkey": bson.MaxKey(), - "Null": None - } - -The above document is transformed into the following (newlines and spaces added -for readability):: - - { - "_id": { - "$oid": "57e193d7a9cc81b4027498b5" - }, - "String": "string", - "Int32": { - "$numberInt": "42" - }, - "Int64": { - "$numberLong": "42" - }, - "Double": { - "$numberDouble": "42.42" - }, - "Decimal": { - "$numberDecimal": "1234.5" - }, - "Binary": { - "$binary": { - "base64": "yO2rw/c4TKO2jauSqRR4ow==", - "subType": "04" - } - }, - "BinaryUserDefined": { - "$binary": { - "base64": "MTIz", - "subType": "80" - } - }, - "Code": { - "$code": "function() {}" - }, - "CodeWithScope": { - "$code": "function() {}", - "$scope": {} - }, - "Subdocument": { - "foo": "bar" - }, - "Array": [ - {"$numberInt": "1"}, - {"$numberInt": "2"}, - {"$numberInt": "3"}, - {"$numberInt": "4"}, - {"$numberInt": "5"} - ], - "Timestamp": { - "$timestamp": { "t": 42, "i": 1 } - }, - "RegularExpression": { - "$regularExpression": { - "pattern": "foo*", - "options": "ix" - } - }, - "DatetimeEpoch": { - "$date": { - "$numberLong": "0" - } - }, - "DatetimePositive": { - "$date": { - "$numberLong": "253402300799999" - } - }, - "DatetimeNegative": { - "$date": { - "$numberLong": "-62135596800000" - } - }, - "True": true, - "False": false, - "DBRef": { - "$ref": "collection", - "$id": { - "$oid": "57e193d7a9cc81b4027498b1" - }, - "$db": "database" - }, - "DBRefNoDB": { - "$ref": "collection", - "$id": { - "$oid": "57fd71e96e32ab4225b723fb" - } - }, - "Minkey": { - "$minKey": 1 - }, - "Maxkey": { - "$maxKey": 1 - }, - "Null": null - } - - -Relaxed Extended JSON Example ------------------------------ - -In Relaxed Extended JSON, the example document is transformed similarly -to Canonical Extended JSON, with the exception of the following -keys (newlines and spaces added for readability):: - - { - ... - "Int32": 42, - "Int64": 42, - "Double": 42.42, - ... - "DatetimeEpoch": { - "$date": "1970-01-01T00:00:00.000Z" - }, - ... - } - -Motivation for Change -===================== - -There existed many Extended JSON parser and generator implementations prior to -this specification that used conflicting formats, since there was no agreement -on the precise format of Extended JSON. This resulted in problems where the -output of some generators could not be consumed by some parsers. - -MongoDB drivers needed a single, standard Extended JSON format for testing that -covers all BSON types. However, there were BSON types that had no defined -Extended JSON representation. This spec primarily addresses that need, but -provides for slightly broader use as well. - -Design Rationale -================ - -Of Relaxed and Canonical Formats --------------------------------- - -There are various use cases for expressing BSON documents in a text rather -that binary format. They broadly fall into two categories: - -* Type preserving: for things like testing, where one has to describe the - expected form of a BSON document, it's helpful to be able to precisely - specify expected types. In particular, numeric types need to differentiate - between Int32, Int64 and Double forms. - -* JSON-like: for things like a web API, where one is sending a document (or a - projection of a document) that only uses ordinary JSON type primitives, it's - desirable to represent numbers in the native JSON format. This output is - also the most human readable and is useful for debugging and documentation. - -The two formats in this specification address these two categories of use cases. - -Of Parsers and Generators -------------------------- - -Parsers need to accept any valid Extended JSON string that a generator can -produce. Parsers and generators are permitted to accept and output strings in -other formats as well for backwards compatibility. - -.. _levels of nesting: - -Acceptable nesting depth has implications for resource usage so unlimited -nesting is not permitted. - -Generators support at least 100 levels of nesting in a BSON document -being transformed to Extended JSON. This aligns with MongoDB's own limitation of -100 levels of nesting. - -Parsers support at least 200 levels of nesting in Extended JSON text, -since the Extended JSON language can double the level of apparent nesting of a -BSON document by wrapping certain types in their own documents. - -Of Canonical Type Wrapper Formats ---------------------------------- - -Prior to this specification, BSON types fell into three categories with respect -to Legacy Extended JSON: - -1. A single, portable representation for the type already existed. - -2. Multiple representations for the type existed among various Extended JSON - generators, and those representations were in conflict with each other or - with current portability goals. - -3. No Legacy Extended JSON representation existed. - -If a BSON type fell into category (1), this specification just declares that -form to be canonical, since all drivers, tools, and libraries already know how -to parse or output this form. There are two exceptions: - -RegularExpression -................. - -The form ``{"$regex: , $options: "}`` has until this -specification been canonical. The change to ``{"$regularExpression": -{pattern: , "options": "}}`` is motivated by a conflict between -the previous canonical form and the ``$regex`` MongoDB query operator. The form -specified here disambiguates between the two, such that a parser can accept any -MongoDB query filter, even one containing the ``$regex`` operator. - -Binary -...... - -The form ``{"$binary": "AQIDBAU=", "$type": "80"}`` has until this specification -been canonical. The change to ``{"$binary": {"base64": "AQIDBAU=", "subType": -"80"}}`` is motivated by a conflict between the previous canonical form and the -``$type`` MongoDB query operator. The form specified here disambiguates between -the two, such that a parser can accept any MongoDB query filter, even one -containing the ``$type`` operator. - -Reconciled type wrappers -........................ - -If a BSON type fell into category (2), this specification selects a new common -representation for the type to be canonical. Conflicting formats were gathered -by surveying a number of Extended JSON generators, including the MongoDB Java -Driver (version 3.3.0), the MongoDB Python Driver (version 3.4.0.dev0), the -MongoDB Extended JSON module on NPM (version 1.7.1), and each minor version of -mongoexport from 2.4.14 through 3.3.12. When possible, we set the "strict" -option on the JSON codec. The following BSON types had conflicting Extended JSON -representations: - -Binary -'''''' - -Some implementations write the Extended JSON form of a Binary object with a -strict two-hexadecimal digit subtype (e.g. they output a leading ``0`` for -subtypes < 16). However, the NPM mongodb-extended-json module and Java driver -use a single hexadecimal digit to represent subtypes less than 16. This -specification makes both one- and two-digit representations acceptable. - -Code -'''' - -Mongoexport 2.4 does not quote the ``Code`` value when writing out the extended -JSON form of a BSON Code object. All other implementations do so. This spec -canonicalises the form where the Javascript code is quoted, since the latter -form adheres to the JSON specification and the former does not. As an -additional note, the NPM mongodb-extended-json module uses the form ``{"code": -""}, omitting the dollar sign (``$``) from the key. This -specification does not accommodate the eccentricity of a single library. - -CodeWithScope -''''''''''''' - -In addition to the same variants as BSON Code types, there are other variations -when turning CodeWithScope objects into Extended JSON. Mongoexport 2.4 and 2.6 -omit the scope portion of CodeWithScope if it is empty, making the output -indistinguishable from a Code type. All other implementations include the empty -scope. This specification therefore canonicalises the form where the scope is -always included. The presence of ``$scope`` is what differentiates Code from -CodeWithScope. - -Datetime -'''''''' - -Mongoexport 2.4 and the Java driver always transform a Datetime object into an -Extended JSON string of the form ``{"$date": }``. This form has -the problem of a potential loss of precision or range on the Datetimes that can -be represented. Mongoexport 2.6 transforms Datetime objects into an extended -JSON string of the form ``{"$date": }`` for -dates starting at or after the Unix epoch (UTC). Dates prior to the epoch take -the form ``{"$date": {"$numberLong": ""}}``. Starting in version -3.0, mongoexport always turns Datetime objects into strings of the form -``{"$date": }``. The NPM mongodb-extended-json -module does the same. The Python driver can also transform Datetime objects into -strings like ``{"$date": {"$numberLong": ""}}``. This -specification canonicalises this form, since this form is the most portable. - -In Relaxed Extended JSON format, this specification provides for ISO-8601 -representation for better readability, but limits it to a portable subset, from -the epoch to the end of the largest year that can be represented with four -digits. This should encompass most typical use of dates in applications. - -DBPointer -''''''''' - -Mongoexport 2.4 and 2.6 use the form ``{"$ref": , "$id": }``. All other implementations studied include the canonical ``ObjectId`` -form: ``{"$ref": , "$id": {"$oid": }}``. Neither of these -forms are distinguishable from that of DBRef, so this specification creates a -new format: ``{"$dbPointer": {"$ref": , "$id": {"$oid": }}}``. - -Newly-added type wrappers -......................... - -If a BSON type fell into category (3), above, this specification creates a type -wrapper format for the type. The following new Extended JSON type wrappers are -introduced by this spec: - -* ``$dbPointer`` - See above. - -* ``$numberInt`` - This is used to preserve the "int32" BSON type in Canonical - Extended JSON. Without using ``$numberInt``, this type will be - indistinguishable from a double in certain languages where the distinction - does not exist, such as Javascript. - -* ``$numberDouble`` - This is used to preserve the ``double`` type in Canonical - Extended JSON, as some JSON generators might omit a trailing ".0" for - integral types. It also supports representing non-finite values like NaN or - Infinity which are prohibited in the JSON specification for numbers. - -* ``$symbol`` - The use of the ``$symbol`` key preserves the symbol type in - Canonical Extended JSON, distinguishing it from JSON strings. - -Reference Implementation -======================== - -[*Canonical Extended JSON format reference implementation needs to be updated*] - -PyMongo implements the Canonical Extended JSON format, which must be chosen by -selecting the right option on the ``JSONOptions`` object:: - - from bson.json_util import dumps, DatetimeRepresentation, CANONICAL_JSON_OPTIONS - - dumps(document, json_options=CANONICAL_JSON_OPTIONS) - -[*Relaxed Extended JSON format reference implementation is TBD*] - -Implementation Notes -==================== - -JSON File Format ----------------- - -Some applications like mongoexport may wish to write multiple Extended JSON -documents to a single file. One way to do this is to list each JSON document -one-per-line. When doing this, it is important to ensure that special characters -like newlines are encoded properly (e.g. ``\n``). - -Duplicate Keys --------------- - -The BSON specification does not prohibit duplicate key names within the same -BSON document, but provides no semantics for the interpretation of duplicate -keys. The JSON specification says that names within an object should be unique, -and many JSON libraries are incapable of handling this scenario. - -This specification is silent on the matter, so as not to conflict with a future -change by either specification. - -Future Work -=========== - -This specification will need to be amended if future BSON types are added to the -BSON specification. - -Q&A -=== - -**Q**. Why was version 2 of the spec necessary? - -**A**. After Version 1 was released, several stakeholders raised concerns that -not providing an option to output BSON numbers as ordinary JSON numbers limited -the utility of Extended JSON for common historical uses. We decided to provide -a second format option and more clearly distinguish the use cases (and -limitations) inherent in each format. - -**Q**. My BSON parser doesn't distinguish every BSON type. Does my Extended -JSON generator need to distinguish these types? - -**A**. No. Some BSON parsers do not emit a unique type for each BSON type, -making round-tripping BSON through such libraries impossible without changing -the document. For example, a ``DBPointer`` will be parsed into a ``DBRef`` by -PyMongo. In such cases, a generator must emit the Extended JSON form for -whatever type the BSON parser emitted. It does not need to preserve type -information when that information has been lost by the BSON parser. - -**Q**. How can implementations which require backwards compatibility with Legacy -Extended JSON, in which BSON regular expressions were represented with -``$regex``, handle parsing of extended JSON test representing a MongoDB query -filter containing the ``$regex`` operator? - -**A**. An implementation can handle this in a number of ways: - -- Introduce an enumeration that determines the behavior of the parser. If the - value is LEGACY, it will parse ``$regex`` and not treat ``$regularExpression`` - specially, and if the value is CANONICAL, it will parse ``$regularExpression`` - and not treat ``$regex`` specially. -- Support both legacy and canonical forms in the parser without requiring the - application to specify one or the other. Making that work for the ``$regex`` - query operator use case will require that the rules set forth in the 1.0.0 - version of this specification are followed for ``$regex``; specifically, that - a document with a ``$regex`` key whose value is a JSON object should be - parsed as a normal document and not reported as an error. - -**Q**. How can implementations which require backwards compatibility with Legacy -Extended JSON, in which BSON binary values were represented like ``{"$binary": -"AQIDBAU=", "$type": "80"}``, handle parsing of extended JSON test representing -a MongoDB query filter containing the ``$type`` operator? - -**A**. An implementation can handle this in a number of ways: - -- Introduce an enumeration that determines the behavior of the parser. If the - value is LEGACY, it will parse the new binary form and not treat the legacy - one specially, and if the value is CANONICAL, it will parse the new form and - not treat the legacy form specially. -- Support both legacy and canonical forms in the parser without requiring the - application to specify one or the other. Making that work for the ``$type`` - query operator use case will require that the rules set forth in the 1.0.0 - version of this specification are followed for ``$type``; specifically, that - a document with a ``$type`` key whose value is an integral type, or a - document with a ``$type`` key but without a ``$binary`` key, should be - parsed as a normal document and not reported as an error. - -**Q**. Sometimes I see the term "extjson" used in other specifications. Is -"extjson" related to this specification? - -**A**. Yes, "extjson" is short for "Extended JSON". - -Changelog -========= - -:2022-10-05: Remove spec front matter and reformat changelog. -:2021-05-26: * Remove any mention of extra dollar-prefixed keys being prohibited - in a DBRef. MongoDB 5.0 and compatible drivers no longer enforce - such restrictions. - * Objects that resemble a DBRef without fully complying to its - structure should be left as-is during parsing. -:2020-09-01: Note that ``$``-prefixed keys not matching a known type MUST be - left as-is when parsing. This is patch-level change as this - behavior was already required in the BSON corpus tests ("Document - with keys that start with $"). -:2020-09-08: * Added support for parsing ``$uuid`` fields as BSON Binary subtype 4. - * Changed the example to using the MongoDB Python Driver. It - previously used the MongoDB Java Driver. The new example excludes - the following BSON types that are unsupported in Python - - ``Symbol``, ``SpecialFloat``, ``DBPointer`` and ``Undefined``. - Transformations for these types are now only documented in the - `Conversion table`_. -:2017-07-20: * Bumped specification to version 2.0. - * Added "Relaxed" format. - * Changed BSON timestamp type wrapper back to - ``{"t": *int*, "i": *int*}`` for backwards compatibility. (The - change in v1 to unsigned 64-bit string was premature optimization) - * Changed BSON regular expression type wrapper to - ``{"$regularExpression": {pattern: *string*, "options": *string*"}}``. - * Changed BSON binary type wrapper to ``{"$binary": {"base64": , "subType": }}`` - * Added "Restrictions and limitations" section. - * Clarified parser and generator rules. -:2017-02-01: Initial specification version 1.0. +.. note:: + This specification has been converted to Markdown and renamed to + `extended-json.md `_. diff --git a/source/gridfs/tests/README.md b/source/gridfs/tests/README.md index 6418f3ec31..8285b2e306 100644 --- a/source/gridfs/tests/README.md +++ b/source/gridfs/tests/README.md @@ -12,7 +12,7 @@ GridFS. These tests utilize the [Unified Test Format](../../unified-test-format/ The unified test format allows binary stream data to be expressed and matched with `$$hexBytes` (for uploads) and `$$matchesHexBytes` (for downloads), respectively; however, those operators are not supported in all contexts, such as `insertData` and `outcome`. When binary data must be expressed as a base64-encoded string -([Extended JSON](../../extended-json.rst) for a BSON binary type), the test SHOULD include a comment noting the +([Extended JSON](../../extended-json.md) for a BSON binary type), the test SHOULD include a comment noting the equivalent value in hexadecimal for human-readability. For example: ```yaml diff --git a/source/index.md b/source/index.md index 5a3fbddafb..08bad10509 100644 --- a/source/index.md +++ b/source/index.md @@ -1,8 +1,11 @@ # MongoDB Specifications +- [Authentication](auth/auth.md) - [BSON Binary Subtype 6](client-side-encryption/subtype6.md) - [BSON Corpus](bson-corpus/bson-corpus.md) - [BSON Decimal128 Type Handling in Drivers](bson-decimal128/decimal128.md) +- [Bulk Write](crud/bulk-write.md) +- [CRUD API](crud/crud.md) - [Causal Consistency Specification](causal-consistency/causal-consistency.md) - [Change Streams](change-streams/change-streams.md) - [Client Side Encryption](client-side-encryption/client-side-encryption.md) @@ -11,9 +14,8 @@ - [Command Logging and Monitoring](command-logging-and-monitoring/command-logging-and-monitoring.md) - [Connection Monitoring and Pooling](connection-monitoring-and-pooling/connection-monitoring-and-pooling.md) - [Connection String Spec](connection-string/connection-string-spec.md) -- [Driver Authentication](auth/auth.md) -- [Driver CRUD API](crud/crud.md) -- [Driver Transactions Specification](transactions/transactions.md) +- [Driver Mantras](./driver-mantras.md) +- [Extended JSON](./extended-json.md) - [FaaS Automated Testing](faas-automated-testing/faas-automated-testing.md) - [GridFS Spec](gridfs/gridfs-spec.md) - [Handling of DBRefs](./dbref.md) @@ -23,15 +25,21 @@ - [Logging](logging/logging.md) - [Max Staleness](max-staleness/max-staleness.md) - [Max Staleness Tests](max-staleness/max-staleness-tests.md) -- [MongoDB Driver Performance Benchmarking](benchmarking/benchmarking.md) - [OP_MSG](message/OP_MSG.md) +- [Performance Benchmarking](benchmarking/benchmarking.md) - [Retryable Reads](retryable-reads/retryable-reads.md) - [Retryable Writes](retryable-writes/retryable-writes.md) - [SDAM Logging and Monitoring Specification](server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md) +- [Server Discovery And Monitoring](server-discovery-and-monitoring/server-discovery-and-monitoring.md) - [Server Discovery And Monitoring -- Summary](server-discovery-and-monitoring/server-discovery-and-monitoring-summary.md) - [Server Discovery And Monitoring -- Test Plan](server-discovery-and-monitoring/server-discovery-and-monitoring-tests.md) - [Server Monitoring](server-discovery-and-monitoring/server-monitoring.md) - [Server Selection](server-selection/server-selection.md) - [Server Selection Test Plan](server-selection/server-selection-tests.md) +- [Server Wire version and Feature List](./wireversion-featurelist.md) +- [Sessions Specification](sessions/driver-sessions.md) +- [Snapshot Reads Specification](sessions/snapshot-sessions.md) +- [Transactions Specification](transactions/transactions.md) +- [URI Options Specification](uri-options/uri-options.md) - [Unified Test Format](unified-test-format/unified-test-format.md) - [Wire Compression in Drivers](compression/OP_COMPRESSED.md) diff --git a/source/initial-dns-seedlist-discovery/tests/README.md b/source/initial-dns-seedlist-discovery/tests/README.md index 5b86b22200..50a77cb3f8 100644 --- a/source/initial-dns-seedlist-discovery/tests/README.md +++ b/source/initial-dns-seedlist-discovery/tests/README.md @@ -92,7 +92,7 @@ These YAML and JSON files contain the following fields: - `hosts`: the discovered topology's list of hosts once SDAM completes a scan - `numHosts`: the expected number of hosts discovered once SDAM completes a scan. This is mainly used to test `srvMaxHosts`, since randomly selected hosts cannot be deterministically asserted. -- `options`: the parsed [URI options](../../uri-options/uri-options.rst) as discovered from the +- `options`: the parsed [URI options](../../uri-options/uri-options.md) as discovered from the [Connection String](../../connection-string/connection-string-spec.md)'s "Connection Options" component and SRV resolution (e.g. TXT records, implicit `tls` default). - `parsed_options`: additional, parsed options from other diff --git a/source/load-balancers/load-balancers.md b/source/load-balancers/load-balancers.md index f9198d18b2..75ba61d57d 100644 --- a/source/load-balancers/load-balancers.md +++ b/source/load-balancers/load-balancers.md @@ -21,7 +21,7 @@ The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SH #### SDAM An abbreviated form of "Server Discovery and Monitoring", specification defined in -[Server Discovery and Monitoring Specification](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst). +[Server Discovery and Monitoring Specification](../server-discovery-and-monitoring/server-discovery-and-monitoring.md). #### Service @@ -60,7 +60,7 @@ SRV record as is done for non-load balanced sharded clusters. ### Server Discovery Logging and Monitoring -
+ #### Monitoring diff --git a/source/logging/logging.md b/source/logging/logging.md index 63e229367f..f8fdee7148 100644 --- a/source/logging/logging.md +++ b/source/logging/logging.md @@ -134,7 +134,7 @@ produce. | Component Name | Specification(s) | Environment Variable | | --------------- | -------------------------------------------------------------------------------------------------------------- | ------------------------------ | | command | [Command Logging and Monitoring](../command-logging-and-monitoring/command-logging-and-monitoring.md) | `MONGODB_LOG_COMMAND` | -| topology | [Server Discovery and Monitoring](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst) | `MONGODB_LOG_TOPOLOGY` | +| topology | [Server Discovery and Monitoring](../server-discovery-and-monitoring/server-discovery-and-monitoring.md) | `MONGODB_LOG_TOPOLOGY` | | serverSelection | [Server Selection](../server-selection/server-selection.md) | `MONGODB_LOG_SERVER_SELECTION` | | connection | [Connection Monitoring and Pooling](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md) | `MONGODB_LOG_CONNECTION` | diff --git a/source/ocsp-support/tests/README.rst b/source/ocsp-support/tests/README.rst index cbb8b6b9c4..263ec7a1f1 100644 --- a/source/ocsp-support/tests/README.rst +++ b/source/ocsp-support/tests/README.rst @@ -14,7 +14,7 @@ drivers can use to prove their conformance to the OCSP Support specification. These tests MUST BE implemented by all drivers. Additional YAML and JSON tests have also been added to the `URI -Options Tests <../../uri-options/tests/README.rst>`__. Specifically, +Options Tests <../../uri-options/tests/README.md>`__. Specifically, the `TLS Options Test <../../uri-options/tests/tls-options.yml>`__ has been updated with additional tests for the new URI options ``tlsDisableOCSPEndpointCheck`` and ``tlsDisableCertificateRevocationCheck``. diff --git a/source/polling-srv-records-for-mongos-discovery/polling-srv-records-for-mongos-discovery.rst b/source/polling-srv-records-for-mongos-discovery/polling-srv-records-for-mongos-discovery.rst index b255c9ad68..7691629d33 100644 --- a/source/polling-srv-records-for-mongos-discovery/polling-srv-records-for-mongos-discovery.rst +++ b/source/polling-srv-records-for-mongos-discovery/polling-srv-records-for-mongos-discovery.rst @@ -26,7 +26,7 @@ specification's definition of monitoring a set of mongos servers in a Sharded TopologyType. .. _`Initial DNS Seedlist Discovery`: ../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md -.. _`Server Discovery and Monitoring`: ../server-discovery-and-monitoring/server-discovery-and-monitoring.rst +.. _`Server Discovery and Monitoring`: ../server-discovery-and-monitoring/server-discovery-and-monitoring.md META ==== @@ -144,7 +144,7 @@ Single-Threaded Drivers The rescan MUST happen **before** scanning all servers as part of the normal scanning_ functionality, but only if *rescanSRVIntervalMS* has passed. -.. _scanning: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#scanning +.. _scanning: ../server-discovery-and-monitoring/server-discovery-and-monitoring.md#scanning Test Plan ========= diff --git a/source/requirements.txt b/source/requirements.txt new file mode 100644 index 0000000000..b854bca214 --- /dev/null +++ b/source/requirements.txt @@ -0,0 +1 @@ +mkdocs \ No newline at end of file diff --git a/source/retryable-reads/retryable-reads.md b/source/retryable-reads/retryable-reads.md index 280b6f65a6..1ffd168a93 100644 --- a/source/retryable-reads/retryable-reads.md +++ b/source/retryable-reads/retryable-reads.md @@ -84,7 +84,7 @@ the defined name but MAY deviate to comply with their existing conventions. Drivers MUST verify server eligibility by ensuring that `maxWireVersion` is at least 6 because retryable reads require a MongoDB 3.6 standalone, replica set or shard cluster, MongoDB 3.6 server wire version is 6 as defined in the -[Server Wire version and Feature List specification](../wireversion-featurelist.rst). +[Server Wire version and Feature List specification](../wireversion-featurelist.md). The minimum server version is 3.6 because @@ -202,7 +202,7 @@ Drivers MUST only attempt to retry a read command if If the driver decides to allow retry and the previous attempt of a retryable read command encounters a retryable error, the driver MUST update its topology according to the Server Discovery and Monitoring spec (see -[SDAM: Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#error-handling)) and +[SDAM: Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#error-handling)) and capture this original retryable error. Drivers should then proceed with selecting a server for a retry attempt. ###### 3a. Selecting the server for retry @@ -247,7 +247,7 @@ and the timeout has not yet expired, then the Driver MUST jump back to step 2b a attempts. Otherwise, drivers MUST update their topology according to the SDAM spec (see -[SDAM: Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#error-handling)). If an +[SDAM: Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#error-handling)). If an error would not allow the caller to infer that an attempt was made (e.g. connection pool exception originating from the driver), the previous error should be raised. If a retry failed due to another retryable error or some other error originating from the server, that error should be raised instead as the caller can infer that an attempt was made and @@ -520,8 +520,8 @@ No. [This is in contrast to the answer supplied in in the retryable writes specification.](../retryable-writes/retryable-writes.md#can-drivers-resend-the-same-wire-protocol-message-on-retry-attempts) However, when retryable writes were implemented, no driver actually chose to resend the same wire protocol message. Today, if a driver attempted to resend the same wire protocol message, this could violate -[the rules for gossiping $clusterTime](../sessions/driver-sessions.rst#gossipping-the-cluster-time): specifically -[the rule that a driver must send the highest seen $clusterTime](../sessions/driver-sessions.rst#sending-the-highest-seen-cluster-time). +[the rules for gossiping $clusterTime](../sessions/driver-sessions.md#gossipping-the-cluster-time): specifically +[the rule that a driver must send the highest seen $clusterTime](../sessions/driver-sessions.md#sending-the-highest-seen-cluster-time). Additionally, there would be a behavioral difference between a driver resending the same wire protocol message and one that does not. For example, a driver that creates a new wire protocol message could exhibit the following diff --git a/source/retryable-writes/retryable-writes.md b/source/retryable-writes/retryable-writes.md index 974295302e..1adfe21e18 100644 --- a/source/retryable-writes/retryable-writes.md +++ b/source/retryable-writes/retryable-writes.md @@ -32,12 +32,12 @@ The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SH The transaction ID identifies the transaction as part of which the command is running. In a write command where the client has requested retryable behavior, it is expressed by the top-level `lsid` and `txnNumber` fields. The `lsid` component is the corresponding server session ID. which is a BSON value defined in the -[Driver Session](../sessions/driver-sessions.rst) specification. The `txnNumber` component is a monotonically increasing +[Driver Session](../sessions/driver-sessions.md) specification. The `txnNumber` component is a monotonically increasing (per server session), positive 64-bit integer. **ClientSession**\ Driver object representing a client session, which is defined in the -[Driver Session](../sessions/driver-sessions.rst) specification. This object is always associated with a server session; +[Driver Session](../sessions/driver-sessions.md) specification. This object is always associated with a server session; however, drivers will pool server sessions so that creating a ClientSession will not always entail creation of a new server session. The name of this object MAY vary across drivers. @@ -45,7 +45,7 @@ server session. The name of this object MAY vary across drivers. An error is considered retryable if it has a RetryableWriteError label in its top-level "errorLabels" field. See [Determining Retryable Errors](#determining-retryable-errors) for more information. -Additional terms may be defined in the [Driver Session](../sessions/driver-sessions.rst) specification. +Additional terms may be defined in the [Driver Session](../sessions/driver-sessions.md) specification. ### Naming Deviations @@ -109,10 +109,11 @@ Supported single-statement write operations include `insertOne()`, `updateOne()` `findOneAndDelete()`, `findOneAndReplace()`, and `findOneAndUpdate()`. Supported multi-statement write operations include `insertMany()` and `bulkWrite()`. The ordered option may be `true` or -`false`. In the case of `bulkWrite()`, `UpdateMany` or `DeleteMany` operations within the `requests` parameter may make -some write commands ineligible for retryability. Drivers MUST evaluate eligibility for each write command sent as part -of the `bulkWrite()` (after order and batch splitting) individually. Drivers MUST NOT alter existing logic for order and -batch splitting in an attempt to maximize retryability for operations within a bulk write. +`false`. For both the collection-level and client-level `bulkWrite()` methods, a bulk write batch is only retryable if +it does not contain any `multi: true` writes (i.e. `UpdateMany` and `DeleteMany`). Drivers MUST evaluate eligibility for +each write command sent as part of the `bulkWrite()` (after order and batch splitting) individually. Drivers MUST NOT +alter existing logic for order and batch splitting in an attempt to maximize retryability for operations within a bulk +write. These methods above are defined in the [CRUD](../crud/crud.md) specification. @@ -215,7 +216,7 @@ The RetryableWriteError label might be added to an error in a variety of ways: - the `writeConcernError.code` field in a mongos response The criteria for retryable errors is similar to the discussion in the SDAM spec's section on - [Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#error-handling), but includes + [Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#error-handling), but includes additional error codes. See [What do the additional error codes mean?](#what-do-the-additional-error-codes-mean) for the reasoning behind these additional errors. @@ -264,8 +265,8 @@ enabled. When constructing a supported write command that will be executed within a MongoClient where retryable writes have been enabled, drivers MUST increment the transaction number for the corresponding server session and include the server session ID and transaction number in top-level `lsid` and `txnNumber` fields, respectively. `lsid` is a BSON value -(discussed in the [Driver Session](../sessions/driver-sessions.rst) specification). `txnNumber` MUST be a positive -64-bit integer (BSON type 0x12). +(discussed in the [Driver Session](../sessions/driver-sessions.md) specification). `txnNumber` MUST be a positive 64-bit +integer (BSON type 0x12). The following example illustrates a possible write command for an `updateOne()` operation: @@ -299,8 +300,8 @@ MUST NOT attempt to retry a write command on any other error. If the first attempt of a write command including a transaction ID encounters a retryable error, the driver MUST update its topology according to the SDAM spec (see: -[Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#error-handling)) and capture -this original retryable error. +[Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#error-handling)) and capture this +original retryable error. Drivers MUST then retry the operation as many times as necessary until any one of the following conditions is reached: @@ -318,7 +319,7 @@ retrying is not possible and drivers MUST raise the retryable error from the pre is able to infer that an attempt was made. If a retry attempt also fails, drivers MUST update their topology according to the SDAM spec (see: -[Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#error-handling)). If an error +[Error Handling](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#error-handling)). If an error would not allow the caller to infer that an attempt was made (e.g. connection pool exception originating from the driver) or the error is labeled "NoWritesPerformed", the error from the previous attempt should be raised. If all server errors are labeled "NoWritesPerformed", then the first error should be raised. @@ -448,12 +449,12 @@ function executeRetryableWrite(command, session) { ``` `handleError` in the above pseudocode refers to the function defined in the -[Error handling pseudocode](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#error-handling-pseudocode) +[Error handling pseudocode](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#error-handling-pseudocode) section of the SDAM specification. When retrying a write command, drivers MUST resend the command with the same transaction ID. Drivers MUST NOT resend the original wire protocol message if doing so would violate rules for -[gossipping the cluster time](../sessions/driver-sessions.rst#gossipping-the-cluster-time) (see: +[gossipping the cluster time](../sessions/driver-sessions.md#gossipping-the-cluster-time) (see: [Can drivers resend the same wire protocol message on retry attempts?](#can-drivers-resend-the-same-wire-protocol-message-on-retry-attempts)). In the case of a multi-statement write operation split across multiple write commands, a failed retry attempt will also @@ -512,7 +513,7 @@ driver API needs to be extended to support this behavior. ## Design Rationale -The design of this specification piggy-backs that of the [Driver Session](../sessions/driver-sessions.rst) specification +The design of this specification piggy-backs that of the [Driver Session](../sessions/driver-sessions.md) specification in that it modifies the driver API as little as possible to introduce the concept of at-most-once semantics and retryable behavior for write operations. A transaction ID will be included in all supported write commands executed within the scope of a MongoClient where retryable writes have been enabled. @@ -556,7 +557,7 @@ The spec concerns itself with retrying write operations that encounter a retryab network error or a response indicating that the node is no longer a primary). A retryable error may be classified as either a transient error (e.g. dropped connection, replica set failover) or persistent outage. In the case of a transient error, the driver will mark the server as "unknown" per the -[SDAM](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst) spec. A subsequent retry attempt will +[SDAM](../server-discovery-and-monitoring/server-discovery-and-monitoring.md) spec. A subsequent retry attempt will allow the driver to rediscover the primary within the designated server selection timeout period (30 seconds by default). If server selection times out during this retry attempt, we can reasonably assume that there is a persistent outage. In the case of a persistent outage, multiple retry attempts are fruitless and would waste time. See @@ -634,7 +635,7 @@ Since retry attempts entail sending the same command and transaction ID to the s the same wire protocol message in order to avoid constructing a new message and computing its checksum. The server will not complain if it receives two messages with the same `requestId`, as the field is only used for logging and populating the `responseTo` field in its replies to the client. That said, re-using a wire protocol message might violate rules for -[gossipping the cluster time](../sessions/driver-sessions.rst#gossipping-the-cluster-time) and might also have +[gossipping the cluster time](../sessions/driver-sessions.md#gossipping-the-cluster-time) and might also have implications for [Command Monitoring](#command-monitoring), since the original write command and its retry attempt may report the same `requestId`. @@ -673,6 +674,8 @@ retryWrites is not true would be inconsistent with the server and potentially co ## Changelog +- 2024-05-08: Add guidance for client-level `bulkWrite()` retryability. + - 2024-05-02: Migrated from reStructuredText to Markdown. - 2024-04-29: Fix the link to the Driver Sessions spec. diff --git a/source/retryable-writes/tests/README.md b/source/retryable-writes/tests/README.md index 151b26181f..e883ca368d 100644 --- a/source/retryable-writes/tests/README.md +++ b/source/retryable-writes/tests/README.md @@ -1,7 +1,5 @@ # Retryable Write Tests -______________________________________________________________________ - ## Introduction The YAML and JSON files in this directory are platform-independent tests meant to exercise a driver's implementation of @@ -71,7 +69,7 @@ Drivers should also assert that command documents are properly constructed with on whether the write operation is supported. [Command Logging and Monitoring](../../command-logging-and-monitoring/command-logging-and-monitoring.rst) may be used to check for the presence of a `txnNumber` field in the command document. Note that command documents may always include an -`lsid` field per the [Driver Session](../../sessions/driver-sessions.rst) specification. +`lsid` field per the [Driver Session](../../sessions/driver-sessions.md) specification. These tests may be run against both a replica set and shard cluster. @@ -106,17 +104,238 @@ Drivers should test that transactions IDs are always included in commands for su The following tests ensure that retryable writes work properly with replica sets and sharded clusters. -1. Test that retryable writes raise an exception when using the MMAPv1 storage engine. For this test, execute a write - operation, such as `insertOne`, which should generate an exception. Assert that the error message is the replacement - error message: +### 1. Test that retryable writes raise an exception when using the MMAPv1 storage engine. + +For this test, execute a write operation, such as `insertOne`, which should generate an exception. Assert that the error +message is the replacement error message: + +``` +This MongoDB deployment does not support retryable writes. Please add +retryWrites=false to your connection string. +``` + +and the error code is 20. + +> [!NOTE] +> Drivers that rely on `serverStatus` to determine the storage engine in use MAY skip this test for sharded clusters, +> since `mongos` does not report this information in its `serverStatus` response. + +### 2. Test that drivers properly retry after encountering PoolClearedErrors. + +This test MUST be implemented by any driver that implements the CMAP specification. + +This test requires MongoDB 4.3.4+ for both the `errorLabels` and `blockConnection` fail point options. + +1. Create a client with maxPoolSize=1 and retryWrites=true. If testing against a sharded deployment, be sure to connect + to only a single mongos. + +2. Enable the following failpoint: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + errorCode: 91, + blockConnection: true, + blockTimeMS: 1000, + errorLabels: ["RetryableWriteError"] + } + } + ``` + +3. Start two threads and attempt to perform an `insertOne` simultaneously on both. + +4. Verify that both `insertOne` attempts succeed. + +5. Via CMAP monitoring, assert that the first check out succeeds. + +6. Via CMAP monitoring, assert that a PoolClearedEvent is then emitted. + +7. Via CMAP monitoring, assert that the second check out then fails due to a connection error. + +8. Via Command Monitoring, assert that exactly three `insert` CommandStartedEvents were observed in total. +9. Disable the failpoint. + +### 3. Test that drivers return the original error after encountering a WriteConcernError with a RetryableWriteError label. + +This test MUST: + +- be implemented by any driver that implements the Command Monitoring specification, +- only run against replica sets as mongos does not propagate the NoWritesPerformed label to the drivers. +- be run against server versions 6.0 and above. + +Additionally, this test requires drivers to set a fail point after an `insertOne` operation but before the subsequent +retry. Drivers that are unable to set a failCommand after the CommandSucceededEvent SHOULD use mocking or write a unit +test to cover the same sequence of events. + +1. Create a client with `retryWrites=true`. + +2. Configure a fail point with error code `91` (ShutdownInProgress): + + ```javascript + { + configureFailPoint: "failCommand", + mode: {times: 1}, + data: { + failCommands: ["insert"], + errorLabels: ["RetryableWriteError"], + writeConcernError: { code: 91 } + } + } + ``` + +3. Via the command monitoring CommandSucceededEvent, configure a fail point with error code `10107` (NotWritablePrimary) + and a NoWritesPerformed label: + + ```javascript + { + configureFailPoint: "failCommand", + mode: {times: 1}, + data: { + failCommands: ["insert"], + errorCode: 10107, + errorLabels: ["RetryableWriteError", "NoWritesPerformed"] + } + } ``` - This MongoDB deployment does not support retryable writes. Please add - retryWrites=false to your connection string. + + Drivers SHOULD only configure the `10107` fail point command if the the succeeded event is for the `91` error + configured in step 2. + +4. Attempt an `insertOne` operation on any record for any database and collection. For the resulting error, assert that + the associated error code is `91`. + +5. Disable the fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "off" + } ``` - and the error code is 20. +### 4. Test that in a sharded cluster writes are retried on a different mongos when one is available. + +This test MUST be executed against a sharded cluster that has at least two mongos instances, supports +`retryWrites=true`, has enabled the `configureFailPoint` command, and supports the `errorLabels` field (MongoDB 4.3.1+). + +> [!NOTE] +> This test cannot reliably distinguish "retry on a different mongos due to server deprioritization" (the behavior +> intended to be tested) from "retry on a different mongos due to normal SDAM randomized suitable server selection". +> Verify relevant code paths are correctly executed by the tests using external means such as a logging, debugger, code +> coverage tool, etc. + +1. Create two clients `s0` and `s1` that each connect to a single mongos from the sharded cluster. They must not connect + to the same mongos. + +2. Configure the following fail point for both `s0` and `s1`: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + errorCode: 6, + errorLabels: ["RetryableWriteError"] + } + } + ``` + +3. Create a client `client` with `retryWrites=true` that connects to the cluster using the same two mongoses as `s0` and + `s1`. + +4. Enable failed command event monitoring for `client`. + +5. Execute an `insert` command with `client`. Assert that the command failed. + +6. Assert that two failed command events occurred. Assert that the failed command events occurred on different mongoses. + +7. Disable the fail points on both `s0` and `s1`. + +### 5. Test that in a sharded cluster writes are retried on the same mongos when no others are available. + +This test MUST be executed against a sharded cluster that supports `retryWrites=true`, has enabled the +`configureFailPoint` command, and supports the `errorLabels` field (MongoDB 4.3.1+). + +Note: this test cannot reliably distinguish "retry on a different mongos due to server deprioritization" (the behavior +intended to be tested) from "retry on a different mongos due to normal SDAM behavior of randomized suitable server +selection". Verify relevant code paths are correctly executed by the tests using external means such as a logging, +debugger, code coverage tool, etc. + +1. Create a client `s0` that connects to a single mongos from the cluster. + +2. Configure the following fail point for `s0`: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + errorCode: 6, + errorLabels: ["RetryableWriteError"], + closeConnection: true + } + } + ``` + +3. Create a client `client` with `directConnection=false` (when not set by default) and `retryWrites=true` that connects + to the cluster using the same single mongos as `s0`. + +4. Enable succeeded and failed command event monitoring for `client`. + +5. Execute an `insert` command with `client`. Assert that the command succeeded. + +6. Assert that exactly one failed command event and one succeeded command event occurred. Assert that both events + occurred on the same mongos. + +7. Disable the fail point on `s0`. + +## Changelog + +- 2024-05-30: Migrated from reStructuredText to Markdown. + +- 2024-02-27: Convert legacy retryable writes tests to unified format. + +- 2024-02-21: Update prose test 4 and 5 to workaround SDAM behavior preventing\ + execution of deprioritization code + paths. + +- 2024-01-05: Fix typo in prose test title. + +- 2024-01-03: Note server version requirements for fail point options and revise\ + tests to specify the `errorLabels` + option at the top-level instead of within `writeConcernError`. + +- 2023-08-26: Add prose tests for retrying in a sharded cluster. + +- 2022-08-30: Add prose test verifying correct error handling for errors with\ + the NoWritesPerformed label, which is to + return the original error. + +- 2022-04-22: Clarifications to `serverless` and `useMultipleMongoses`. + +- 2021-08-27: Add `serverless` to `runOn`. Clarify behavior of\ + `useMultipleMongoses` for `LoadBalanced` topologies. + +- 2021-04-23: Add `load-balanced` to test topology requirements. + +- 2021-03-24: Add prose test verifying `PoolClearedErrors` are retried. + +- 2019-10-21: Add `errorLabelsContain` and `errorLabelsContain` fields to\ + `result` + +- 2019-08-07: Add Prose Tests section + +- 2019-06-07: Mention $merge stage for aggregate alongside $out + +- 2019-03-01: Add top-level `runOn` field to denote server version and/or\ + topology requirements requirements for the + test file. Removes the `minServerVersion` and `maxServerVersion` top-level fields, which are now expressed within + `runOn` elements. - [!NOTE] - storage engine in use MAY skip this test for sharded clusters, since `mongos` does not report this information in its - `serverStatus` response. + Add test-level `useMultipleMongoses` field. diff --git a/source/retryable-writes/tests/etc/templates/handshakeError.yml.template b/source/retryable-writes/tests/etc/templates/handshakeError.yml.template index 3974392a6f..d9037d5b20 100644 --- a/source/retryable-writes/tests/etc/templates/handshakeError.yml.template +++ b/source/retryable-writes/tests/etc/templates/handshakeError.yml.template @@ -51,6 +51,10 @@ tests: # - Tests whether operation successfully retries the handshake and succeeds. {% for operation in operations %} - description: "{{operation.object}}.{{operation.operation_name}} succeeds after retryable handshake network error" + {%- if (operation.operation_name == 'clientBulkWrite') %} + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + {%- endif %} operations: - name: failPoint object: testRunner @@ -95,6 +99,10 @@ tests: commandName: {{operation.command_name}} - description: "{{operation.object}}.{{operation.operation_name}} succeeds after retryable handshake server error (ShutdownInProgress)" + {%- if (operation.operation_name == 'clientBulkWrite') %} + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + {%- endif %} operations: - name: failPoint object: testRunner diff --git a/source/retryable-writes/tests/unified/client-bulkWrite-clientErrors.json b/source/retryable-writes/tests/unified/client-bulkWrite-clientErrors.json new file mode 100644 index 0000000000..e2c0fb9c0a --- /dev/null +++ b/source/retryable-writes/tests/unified/client-bulkWrite-clientErrors.json @@ -0,0 +1,350 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/source/retryable-writes/tests/unified/client-bulkWrite-clientErrors.yml b/source/retryable-writes/tests/unified/client-bulkWrite-clientErrors.yml new file mode 100644 index 0000000000..85696e89db --- /dev/null +++ b/source/retryable-writes/tests/unified/client-bulkWrite-clientErrors.yml @@ -0,0 +1,172 @@ +description: "client bulkWrite retryable writes with client errors" +schemaVersion: "1.21" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with one network error succeeds after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 4 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with two network errors fails after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 2 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectError: + isClientError: true + errorLabelsContain: ["RetryableWriteError"] # Error label added by driver. + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } diff --git a/source/retryable-writes/tests/unified/client-bulkWrite-serverErrors.json b/source/retryable-writes/tests/unified/client-bulkWrite-serverErrors.json new file mode 100644 index 0000000000..4a0b210eb5 --- /dev/null +++ b/source/retryable-writes/tests/unified/client-bulkWrite-serverErrors.json @@ -0,0 +1,872 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "clientRetryWritesFalse", + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with retryWrites: false does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "clientRetryWritesFalse", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "clientRetryWritesFalse", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "clientRetryWritesFalse", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/source/retryable-writes/tests/unified/client-bulkWrite-serverErrors.yml b/source/retryable-writes/tests/unified/client-bulkWrite-serverErrors.yml new file mode 100644 index 0000000000..23d2c622ee --- /dev/null +++ b/source/retryable-writes/tests/unified/client-bulkWrite-serverErrors.yml @@ -0,0 +1,412 @@ +description: "client bulkWrite retryable writes" +schemaVersion: "1.21" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - client: + id: &clientRetryWritesFalse clientRetryWritesFalse + uriOptions: + retryWrites: false + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with no multi: true operations succeeds after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 222 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with multi: true operations fails after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace + - description: "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - description: "client bulkWrite with multi: true operations fails after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + writeConcernErrors: + - code: 91 + message: "Replication is being shut down" + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace + - description: "client bulkWrite with retryWrites: false does not retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *clientRetryWritesFalse + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *clientRetryWritesFalse + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *clientRetryWritesFalse + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace diff --git a/source/retryable-writes/tests/unified/handshakeError.json b/source/retryable-writes/tests/unified/handshakeError.json index df37bd7232..3c46463759 100644 --- a/source/retryable-writes/tests/unified/handshakeError.json +++ b/source/retryable-writes/tests/unified/handshakeError.json @@ -53,6 +53,222 @@ } ], "tests": [ + { + "description": "client.clientBulkWrite succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, { "description": "collection.insertOne succeeds after retryable handshake network error", "operations": [ diff --git a/source/retryable-writes/tests/unified/handshakeError.yml b/source/retryable-writes/tests/unified/handshakeError.yml index 9b2774bc77..131bbf2e5c 100644 --- a/source/retryable-writes/tests/unified/handshakeError.yml +++ b/source/retryable-writes/tests/unified/handshakeError.yml @@ -50,6 +50,96 @@ tests: # - Triggers failpoint (second time). # - Tests whether operation successfully retries the handshake and succeeds. + - description: "client.clientBulkWrite succeeds after retryable handshake network error" + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [ping, saslContinue] + closeConnection: true + - name: runCommand + object: *database + arguments: { commandName: ping, command: { ping: 1 } } + expectError: { isError: true } + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: retryable-writes-handshake-tests.coll + document: { _id: 8, x: 88 } + expectEvents: + - client: *client + eventType: cmap + events: + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - client: *client + events: + - commandStartedEvent: + command: { ping: 1 } + databaseName: *databaseName + - commandFailedEvent: + commandName: ping + - commandStartedEvent: + commandName: bulkWrite + - commandSucceededEvent: + commandName: bulkWrite + + - description: "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)" + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [ping, saslContinue] + closeConnection: true + - name: runCommand + object: *database + arguments: { commandName: ping, command: { ping: 1 } } + expectError: { isError: true } + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: retryable-writes-handshake-tests.coll + document: { _id: 8, x: 88 } + expectEvents: + - client: *client + eventType: cmap + events: + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - client: *client + events: + - commandStartedEvent: + command: { ping: 1 } + databaseName: *databaseName + - commandFailedEvent: + commandName: ping + - commandStartedEvent: + commandName: bulkWrite + - commandSucceededEvent: + commandName: bulkWrite + - description: "collection.insertOne succeeds after retryable handshake network error" operations: - name: failPoint diff --git a/source/run-command/run-command.rst b/source/run-command/run-command.rst index a51f2b0261..5e7e667966 100644 --- a/source/run-command/run-command.rst +++ b/source/run-command/run-command.rst @@ -76,7 +76,7 @@ The following represents how a runCommand API SHOULD be exposed. * An optional explicit client session. * The associated logical session id (`lsid`) the driver MUST apply to the command. * - * @see https://github.com/mongodb/specifications/blob/master/source/sessions/driver-sessions.rst#clientsession + * @see ../sessions/driver-sessions.md#clientsession */ session?: ClientSession; @@ -129,11 +129,11 @@ Drivers MUST NOT attempt to check the command document for the presence of an `` Every ClientSession has a corresponding logical session ID representing the server-side session ID. The logical session ID MUST be included under ``lsid`` in the command sent to the server without modifying user input. -* See Driver Sessions' section on `Sending the session ID to the server on all commands `_ +* See Driver Sessions' section on `Sending the session ID to the server on all commands <../sessions/driver-sessions.md#sending-the-session-id-to-the-server-on-all-commands>`_ The command sent to the server MUST gossip the ``$clusterTime`` if cluster time support is detected. -* See Driver Sessions' section on `Gossipping the cluster time `_ +* See Driver Sessions' section on `Gossipping the cluster time <../sessions/driver-sessions.md#gossipping-the-cluster-time>`_ Transactions """""""""""" @@ -274,7 +274,7 @@ All ``getMore`` commands constructed for this cursor MUST send the same ``lsid`` A cursor is considered exhausted or closed when the server reports its ``id`` as zero. When the cursor is exhausted the client session MUST be ended and the server session returned to the pool as early as possible rather than waiting for a caller to completely iterate the final batch. -* See Drivers Sessions' section on `Sessions and Cursors `_ +* See Drivers Sessions' section on `Sessions and Cursors <../sessions/driver-sessions.md#sessions-and-cursors>`_ Server Selection """""""""""""""" @@ -320,7 +320,7 @@ Drivers MUST provide an explicit mechanism for releasing the cursor resources, t If the cursor id is nonzero a KillCursors operation MUST be attempted, the result of the operation SHOULD be ignored. The ClientSession associated with the cursor MUST be ended and the ServerSession returned to the pool. -* See Driver Sessions' section on `When sending a killCursors command `_ +* See Driver Sessions' section on `When sending a killCursors command <../sessions/driver-sessions.md#when-sending-a-killcursors-command>`_ * See Find, getMore and killCursors commands' section on `killCursors `_ Client Side Operations Timeout diff --git a/source/server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md b/source/server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md index a7589a6472..5437f0eb75 100644 --- a/source/server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md +++ b/source/server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md @@ -33,8 +33,7 @@ The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SH `Server` > The term `Server` refers to the implementation in the driver's language of an abstraction of a mongod or mongos -> process, or a load balancer, as defined by the -> [SDAM specification](https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#server). +> process, or a load balancer, as defined by the [SDAM specification](server-discovery-and-monitoring.md#server). ### Specification @@ -369,59 +368,14 @@ The following table describes the rules for determining if a topology type has r preference is passed to `hasReadableServer`, the driver MUST default the value to the default read preference, `primary`, or treat the call as if `primary` was provided. - ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Topology TypehasReadableServerhasWritableServer
Unknownfalsefalse
Singletrue if the server is availabletrue if the server is available
ReplicaSetNoPrimary
Called with primary: -false
-Called with any other option: uses the read preference to determine if -any server in the cluster is suitable for reading.
-Called with no option: false
false
ReplicaSetWithPrimary
Called with any valid option: uses the read -preference to determine if any server in the cluster is suitable for -reading.
-Called with no option: true
true
Shardedtrue if 1+ servers are availabletrue if 1+ servers are available
LoadBalancedtruetrue
+| Topology Type | `hasReadableServer` | `hasWritableServer` | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------- | +| Unknown | `false` | `false` | +| Single | `true` if the server is available | `true` if the server is available | +| ReplicaSetNoPrimary | Called with `primary`: `false`
Called with any other option: uses the read preference to determine if any server in the cluster is suitable for reading.
Called with no option: `false` | `false` | +| ReplicaSetWithPrimary | Called with any valid option: uses the read preference to determine if any server in the cluster is suitable for reading.
Called with no option: `true` | `true` | +| Sharded | `true` if 1+ servers are available | `true` if 1+ servers are available | +| LoadBalanced | `true` | `true` | ### Log Messages @@ -617,13 +571,9 @@ See the [README](tests/monitoring/README.md). - 2021-05-06: Updated to use modern terminology. -# \<\<\<\<\<\<\< HEAD :2024-03-29: Updated to clarify expected initial value of TopologyDescriptionChangedEvent's previousDescription field :2024-01-17: Updated to require that `TopologyDescriptionChangedEvent` should be emitted before just `TopologyClosedEvent` is emitted :2024-01-04: Updated to clarify when ServerHeartbeatStartedEvent should be emitted :2023-03-31: Renamed to include "logging" in the title. Reorganized contents and made consistent with CLAM spec, and added requirements for SDAM log messages. :2022-10-05: Remove spec front matter and reformat changelog. :2021-05-06: Updated to use modern terminology. :2020-04-20: Add rules for streaming heartbeat protocol and add "awaited" field to heartbeat events. :2018:12-12: Clarified table of rules for readable/writable servers :2016-08-31: Added table of rules for determining if topology has readable/writable servers. :2016-10-11: TopologyDescription objects MAY have additional methods and properties. ||||||| parent of 469393fd (DRIVERS-2789 Convert SDAM Spec to Markdown) :2024-03-29: Updated to clarify expected initial value of TopologyDescriptionChangedEvent's previousDescription field :2024-01-04: Updated to clarify when ServerHeartbeatStartedEvent should be emitted :2023-03-31: Renamed to include "logging" in the title. Reorganized contents and made consistent with CLAM spec, and added requirements for SDAM log messages. :2022-10-05: Remove spec front matter and reformat changelog. :2021-05-06: Updated to use modern terminology. :2020-04-20: Add rules for streaming heartbeat protocol and add "awaited" field to heartbeat events. :2018:12-12: Clarified table of rules for readable/writable servers :2016-08-31: Added table of rules for determining if topology has readable/writable servers. :2016-10-11: TopologyDescription objects MAY have additional methods and properties. - - 2020-04-20: Add rules for streaming heartbeat protocol and add "awaited" field to heartbeat events. -> > > > > > > 469393fd (DRIVERS-2789 Convert SDAM Spec to Markdown) - -- 2018:12-12: Clarified table of rules for readable/writable servers +- 2018-12-12: Clarified table of rules for readable/writable servers - 2016-08-31: Added table of rules for determining if topology has readable/writable servers. diff --git a/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md b/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md new file mode 100644 index 0000000000..498fde0eaf --- /dev/null +++ b/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md @@ -0,0 +1,1990 @@ +# Server Discovery And Monitoring + +- Status: Accepted +- Minimum Server Version: 2.4 + +______________________________________________________________________ + +## Abstract + +This spec defines how a MongoDB client discovers and monitors one or more servers. It covers monitoring a single server, +a set of mongoses, or a replica set. How does the client determine what type of servers they are? How does it keep this +information up to date? How does the client find an entire replica set from a seed list, and how does it respond to a +stepdown, election, reconfiguration, or network error? + +All drivers must answer these questions the same. Or, where platforms' limitations require differences among drivers, +there must be as few answers as possible and each must be clearly explained in this spec. Even in cases where several +answers seem equally good, drivers must agree on one way to do it. + +MongoDB users and driver authors benefit from having one way to discover and monitor servers. Users can substantially +understand their driver's behavior without inspecting its code or asking its author. Driver authors can avoid subtle +mistakes when they take advantage of a design that has been well-considered, reviewed, and tested. + +The server discovery and monitoring method is specified in four sections. First, a client is +[configured](#configuration). Second, it begins [monitoring](#monitoring) by calling +[hello or legacy hello](../mongodb-handshake/handshake.rst#terms) on all servers. (Multi-threaded and asynchronous +monitoring is described first, then single-threaded monitoring.) Third, as hello or legacy hello responses are received +the client [parses them](#parsing-a-hello-or-legacy-hello-response), and fourth, it \[updates its view of the +topology\](#updates its view of the topology). + +Finally, this spec describes how \[drivers update their topology view in response to errors\](#drivers update their +topology view in response to errors), and includes generous implementation notes for driver authors. + +This spec does not describe how a client chooses a server for an operation; that is the domain of the Server Selection +Spec. But there is a section describing the \[interaction between monitoring and server selection\](#interaction between +monitoring and server selection). + +There is no discussion of driver architecture and data structures, nor is there any specification of a user-facing API. +This spec is only concerned with the algorithm for monitoring the server topology. + +## Meta + +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and +"OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt). + +## Specification + +### General Requirements + +**Direct connections:** A client MUST be able to connect to a single server of any type. This includes querying hidden +replica set members, and connecting to uninitialized members (see [RSGhost](#rsghost-and-rsother)) in order to run +"replSetInitiate". Setting a read preference MUST NOT be necessary to connect to a secondary. Of course, the secondary +will reject all operations done with the PRIMARY read preference because the secondaryOk bit is not set, but the initial +connection itself succeeds. Drivers MAY allow direct connections to arbiters (for example, to run administrative +commands). + +**Replica sets:** A client MUST be able to discover an entire replica set from a seed list containing one or more +replica set members. It MUST be able to continue monitoring the replica set even when some members go down, or when +reconfigs add and remove members. A client MUST be able to connect to a replica set while there is no primary, or the +primary is down. + +**Mongos:** A client MUST be able to connect to a set of mongoses and monitor their availability and +[round trip time](#round-trip-time). This spec defines how mongoses are discovered and monitored, but does not define +which mongos is selected for a given operation. + +### Terms + +#### Server + +A mongod or mongos process, or a load balancer. + +#### Deployment + +One or more servers: either a standalone, a replica set, or one or more mongoses. + +#### Topology + +The state of the deployment: its type (standalone, replica set, or sharded), which servers are up, what type of servers +they are, which is primary, and so on. + +#### Client + +Driver code responsible for connecting to MongoDB. + +#### Seed list + +Server addresses provided to the client in its initial configuration, for example from the +[connection string](https://www.mongodb.com/docs/manual/reference/connection-string/). + +#### Data-Bearing Server Type + +A server type from which a client can receive application data: + +- Mongos +- RSPrimary +- RSSecondary +- Standalone +- LoadBalanced + +#### Round trip time + +Also known as RTT. + +The client's measurement of the duration of one hello or legacy hello call. The round trip time is used to support the +"localThresholdMS"[^1] option in the Server Selection Spec. + +#### hello or legacy hello outcome + +The result of an attempt to call the hello or legacy hello command on a server. It consists of three elements: a boolean +indicating the success or failure of the attempt, a document containing the command response (or null if it failed), and +the round trip time to execute the command (or null if it failed). + +#### check + +The client checks a server by attempting to call hello or legacy hello on it, and recording the outcome. + +#### scan + +The process of checking all servers in the deployment. + +#### suitable + +A server is judged "suitable" for an operation if the client can use it for a particular operation. For example, a write +requires a standalone, primary, or mongos. Suitability is fully specified in the +[Server Selection Spec](../server-selection/server-selection.md). + +#### address + +The hostname or IP address, and port number, of a MongoDB server. + +#### network error + +An error that occurs while reading from or writing to a network socket. + +#### network timeout + +A timeout that occurs while reading from or writing to a network socket. + +#### minHeartbeatFrequencyMS + +Defined in the [Server Monitoring spec](server-monitoring.rst). This value MUST be 500 ms, and it MUST NOT be +configurable. + +#### pool generation number + +The pool's generation number which starts at 0 and is incremented each time the pool is cleared. Defined in the +[Connection Monitoring and Pooling spec](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md). + +#### connection generation number + +The pool's generation number at the time this connection was created. Defined in the +[Connection Monitoring and Pooling spec](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md). + +#### error generation number + +The error's generation number is the generation of the connection on which the application error occurred. Note that +when a network error occurs before the handshake completes then the error's generation number is the generation of the +pool at the time the connection attempt was started. + +#### State Change Error + +A server reply document indicating a "not writable primary" or "node is recovering" error. Starting in MongoDB 4.4 these +errors may also include a [topologyVersion](#topologyversion) field. + +### Data structures + +This spec uses a few data structures to describe the client's view of the topology. It must be emphasized that a driver +is free to implement the same behavior using different data structures. This spec uses these enums and structs in order +to describe driver **behavior**, not to mandate how a driver represents the topology, nor to mandate an API. + +#### Constants + +##### clientMinWireVersion and clientMaxWireVersion + +Integers. The wire protocol range supported by the client. + +#### Enums + +##### TopologyType + +Single, ReplicaSetNoPrimary, ReplicaSetWithPrimary, Sharded, LoadBalanced, or Unknown. + +See [updating the TopologyDescription](#updating-the-topologydescription). + +##### ServerType + +Standalone, Mongos, PossiblePrimary, RSPrimary, RSSecondary, RSArbiter, RSOther, RSGhost, LoadBalancer or Unknown. + +See [parsing a hello or legacy hello response](#parsing-a-hello-or-legacy-hello-response). + +> [!NOTE] +> Single-threaded clients use the PossiblePrimary type to maintain proper +> [scanning order](server-monitoring.rst#scanning-order). Multi-threaded and asynchronous clients do not need this +> ServerType; it is synonymous with Unknown. + +#### TopologyDescription + +The client's representation of everything it knows about the deployment's topology. + +Fields: + +- type: a [TopologyType](#topologytype) enum value. See [initial TopologyType](#initial-topologytype). +- setName: the replica set name. Default null. +- maxElectionId: an ObjectId or null. The largest electionId ever reported by a primary. Default null. Part of the + (`electionId`, `setVersion`) tuple. +- maxSetVersion: an integer or null. The largest setVersion ever reported by a primary. It may not monotonically + increase, as electionId takes precedence in ordering Default null. Part of the (`electionId`, `setVersion`) tuple. +- servers: a set of ServerDescription instances. Default contains one server: "localhost:27017", ServerType Unknown. +- stale: a boolean for single-threaded clients, whether the topology must be re-scanned. (Not related to + maxStalenessSeconds, nor to \[stale primaries\](#stale primaries).) +- compatible: a boolean. False if any server's wire protocol version range is incompatible with the client's. Default + true. +- compatibilityError: a string. The error message if "compatible" is false, otherwise null. +- logicalSessionTimeoutMinutes: integer or null. Default null. See [logical session timeout](#logical-session-timeout). + +#### ServerDescription + +The client's view of a single server, based on the most recent hello or legacy hello outcome. + +Again, drivers may store this information however they choose; this data structure is defined here merely to describe +the monitoring algorithm. + +Fields: + +- address: the hostname or IP, and the port number, that the client connects to. Note that this is **not** the "me" + field in the server's hello or legacy hello response, in the case that the server reports an address different from + the address the client uses. +- (=) error: information about the last error related to this server. Default null. +- roundTripTime: the duration of the hello or legacy hello call. Default null. +- minRoundTripTime: the minimum RTT for the server. Default null. +- lastWriteDate: a 64-bit BSON datetime or null. The "lastWriteDate" from the server's most recent hello or legacy hello + response. +- opTime: an opTime or null. An opaque value representing the position in the oplog of the most recently seen write. + Default null. (Only mongos and shard servers record this field when monitoring config servers as replica sets, at + least until + [drivers allow applications to use readConcern "afterOptime".](../max-staleness/max-staleness.md#future-feature-to-support-readconcern-afteroptime)) +- (=) type: a [ServerType](#servertype) enum value. Default Unknown. +- (=) minWireVersion, maxWireVersion: the wire protocol version range supported by the server. Both default to 0. \[Use + min and maxWireVersion only to determine compatibility\](#use min and maxWireVersion only to determine compatibility). +- (=) me: The hostname or IP, and the port number, that this server was configured with in the replica set. Default + null. +- (=) hosts, passives, arbiters: Sets of addresses. This server's opinion of the replica set's members, if any. These + [hostnames are normalized to lower-case](#hostnames-are-normalized-to-lower-case). Default empty. The client + \[monitors all three types of servers\](#monitors all three types of servers) in a replica set. +- (=) tags: map from string to string. Default empty. +- (=) setName: string or null. Default null. +- (=) electionId: an ObjectId, if this is a MongoDB 2.6+ replica set member that believes it is primary. See + [using electionId and setVersion to detect stale primaries](#using-electionid-and-setversion-to-detect-stale-primaries). + Default null. +- (=) setVersion: integer or null. Default null. +- (=) primary: an address. This server's opinion of who the primary is. Default null. +- lastUpdateTime: when this server was last checked. Default "infinity ago". +- (=) logicalSessionTimeoutMinutes: integer or null. Default null. +- (=) topologyVersion: A topologyVersion or null. Default null. The "topologyVersion" from the server's most recent + hello or legacy hello response or [State Change Error](#state-change-error). +- (=) iscryptd: boolean indicating if the server is a + [mongocryptd](../client-side-encryption/client-side-encryption.md#mongocryptd) server. Default null. + +"Passives" are priority-zero replica set members that cannot become primary. The client treats them precisely the same +as other members. + +Fields marked (=) are used for [Server Description Equality](#server-description-equality) comparison. + +### Configuration + +#### No breaking changes + +This spec does not intend to require any drivers to make breaking changes regarding what configuration options are +available, how options are named, or what combinations of options are allowed. + +#### Initial TopologyDescription + +The default values for [TopologyDescription](#topologydescription) fields are described above. Users may override the +defaults as follows: + +##### Initial Servers + +The user MUST be able to set the initial servers list to a [seed list](#seed-list) of one or more addresses. + +The hostname portion of each address MUST be normalized to lower-case. + +##### Initial TopologyType + +If the `directConnection` URI option is specified when a MongoClient is constructed, the TopologyType must be +initialized based on the value of the `directConnection` option and the presence of the `replicaSet` option according to +the following table: + +| directConnection | replicaSet present | Initial TopologyType | +| ---------------- | ------------------ | -------------------- | +| true | no | Single | +| true | yes | Single | +| false | no | Unknown | +| false | yes | ReplicaSetNoPrimary | + +If the `directConnection` option is not specified, newly developed drivers MUST behave as if it was specified with the +false value. + +Since changing the starting topology can reasonably be considered a backwards-breaking change, existing drivers SHOULD +stage implementation according to semantic versioning guidelines. Specifically, support for the `directConnection` URI +option can be added in a minor release. In a subsequent major release, the default starting topology can be changed to +Unknown. Drivers MUST document this in a prior minor release. + +Existing drivers MUST deprecate other URI options, if any, for controlling topology discovery or specifying the +deployment topology. If such a legacy option is specified and the `directConnection` option is also specified, and the +values of the two options are semantically different, the driver MUST report an error during URI option parsing. + +The API for initializing TopologyType using language-specific native options is not specified here. Drivers might +already have a convention, e.g. a single seed means Single, a setName means ReplicaSetNoPrimary, and a list of seeds +means Unknown. There are variations, however: In the Java driver a single seed means Single, but a **list** containing +one seed means Unknown, so it can transition to replica-set monitoring if the seed is discovered to be a replica set +member. In contrast, PyMongo requires a non-null setName in order to begin replica-set monitoring, regardless of the +number of seeds. This spec does not cover language-specific native options that a driver may provide. + +##### Initial setName + +It is allowed to use `directConnection=true` in conjunction with the `replicaSet` URI option. The driver must connect in +Single topology and verify that setName matches the specified name, as per +[verifying setName with TopologyType Single](#verifying-setname-with-topologytype-single). + +When a MongoClient is initialized using language-specific native options, the user MUST be able to set the client's +initial replica set name. A driver MAY require the set name in order to connect to a replica set, or it MAY be able to +discover the replica set name as it connects. + +##### Allowed configuration combinations + +Drivers MUST enforce: + +- TopologyType Single cannot be used with multiple seeds. +- `directConnection=true` cannot be used with multiple seeds. +- If setName is not null, only TopologyType ReplicaSetNoPrimary, and possibly Single, are allowed. (See + [verifying setName with TopologyType Single](#verifying-setname-with-topologytype-single).) +- `loadBalanced=true` cannot be used in conjunction with `directConnection=true` or `replicaSet` + +##### Handling of SRV URIs resolving to single host + +When a driver is given an SRV URI, if the `directConnection` URI option is not specified, and the `replicaSet` URI +option is not specified, the driver MUST start in Unknown topology, and follow the rules in the +[TopologyType table](#topologytype-table) for transitioning to other topologies. In particular, the driver MUST NOT use +the number of hosts from the initial SRV lookup to decide what topology to start in. + +#### heartbeatFrequencyMS + +The interval between server [checks](#check), counted from the end of the previous check until the beginning of the next +one. + +For multi-threaded and asynchronous drivers it MUST default to 10 seconds and MUST be configurable. For single-threaded +drivers it MUST default to 60 seconds and MUST be configurable. It MUST be called heartbeatFrequencyMS unless this +breaks backwards compatibility. + +For both multi- and single-threaded drivers, the driver MUST NOT permit users to configure it less than +minHeartbeatFrequencyMS (500ms). + +(See +[heartbeatFrequencyMS defaults to 10 seconds or 60 seconds](#heartbeatfrequencyms-defaults-to-10-seconds-or-60-seconds) +and [what's the point of periodic monitoring?](#whats-the-point-of-periodic-monitoring)) + +### Client construction + +Except for [initial DNS seed list discovery](../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md) when +given a connection string with `mongodb+srv` scheme, the client's constructor MUST NOT do any I/O. This means that the +constructor does not throw an exception if servers are unavailable: the topology is not yet known when the constructor +returns. Similarly if a server has an incompatible wire protocol version, the constructor does not throw. Instead, all +subsequent operations on the client fail as long as the error persists. + +See [clients do no I/O in the constructor](#clients-do-no-io-in-the-constructor) for the justification. + +#### Multi-threaded and asynchronous client construction + +The constructor MAY start the monitors as background tasks and return immediately. Or the monitors MAY be started by +some method separate from the constructor; for example they MAY be started by some "initialize" method (by any name), or +on the first use of the client for an operation. + +#### Single-threaded client construction + +Single-threaded clients do no I/O in the constructor. They MUST [scan](#scan) the servers on demand, when the first +operation is attempted. + +### Client closing + +When a client is closing, before it emits the `TopologyClosedEvent` as per the +[Events API](./server-discovery-and-monitoring-logging-and-monitoring.md#events-api), it SHOULD [remove](#remove) all +servers from its `TopologyDescription` and set its `TopologyType` to `Unknown`, emitting the corresponding +`TopologyDescriptionChangedEvent`. + +### Monitoring + +See the [Server Monitoring spec](server-monitoring.rst) for how a driver monitors each server. In summary, the client +monitors each server in the topology. The scope of server monitoring is to provide the topology with updated +ServerDescriptions based on hello or legacy hello command responses. + +### Parsing a hello or legacy hello response + +The client represents its view of each server with a [ServerDescription](#serverdescription). Each time the client +[checks](#check) a server, it MUST replace its description of that server with a new one if and only if the new +ServerDescription's [topologyVersion](#topologyversion) is greater than or equal to the current ServerDescription's +[topologyVersion](#topologyversion). + +(See [Replacing the TopologyDescription](#replacing-the-topologydescription) for an example implementation.) + +This replacement MUST happen even if the new server description compares equal to the previous one, in order to keep +client-tracked attributes like last update time and round trip time up to date. + +Drivers MUST be able to handle responses to both `hello` and legacy hello commands. When checking results, drivers MUST +first check for the `isWritablePrimary` field and fall back to checking for an `ismaster` field if `isWritablePrimary` +was not found. + +ServerDescriptions are created from hello or legacy hello outcomes as follows: + +#### type + +The new ServerDescription's type field is set to a [ServerType](#servertype). Note that these states do **not** exactly +correspond to [replica set member states](https://www.mongodb.com/docs/manual/reference/replica-states/). For example, +some replica set member states like STARTUP and RECOVERING are identical from the client's perspective, so they are +merged into "RSOther". Additionally, states like Standalone and Mongos are not replica set member states at all. + +| State | Symptoms | +| --------------- | ------------------------------------------------------------------------------------------------------------------------- | +| Unknown | Initial, or after a network error or failed hello or legacy hello call, or "ok: 1" not in hello or legacy hello response. | +| Standalone | No "msg: isdbgrid", no setName, and no "isreplicaset: true". | +| Mongos | "msg: isdbgrid". | +| PossiblePrimary | Not yet checked, but another member thinks it is the primary. | +| RSPrimary | "isWritablePrimary: true" or "ismaster: true", "setName" in response. | +| RSSecondary | "secondary: true", "setName" in response. | +| RSArbiter | "arbiterOnly: true", "setName" in response. | +| RSOther | "setName" in response, "hidden: true" or not primary, secondary, nor arbiter. | +| RSGhost | "isreplicaset: true" in response. | +| LoadBalanced | "loadBalanced=true" in URI. | + +A server can transition from any state to any other. For example, an administrator could shut down a secondary and bring +up a mongos in its place. + +##### RSGhost and RSOther + +The client MUST monitor replica set members even when they cannot be queried. These members are in state RSGhost or +RSOther. + +**RSGhost** members occur in at least three situations: + +- briefly during server startup, +- in an uninitialized replica set, +- or when the server is shunned (removed from the replica set config). + +An RSGhost server has no hosts list nor setName. Therefore the client MUST NOT attempt to use its hosts list nor check +its setName (see [JAVA-1161](https://jira.mongodb.org/browse/JAVA-1161) or +[CSHARP-671](https://jira.mongodb.org/browse/CSHARP-671).) However, the client MUST keep the RSGhost member in its +TopologyDescription, in case the client's only hope for staying connected to the replica set is that this member will +transition to a more useful state. + +For simplicity, this is the rule: any server is an RSGhost that reports "isreplicaset: true". + +Non-ghost replica set members have reported their setNames since MongoDB 1.6.2. See +[only support replica set members running MongoDB 1.6.2 or later](#only-support-replica-set-members-running-mongodb-162-or-later). + +> [!NOTE] +> The Java driver does not have a separate state for RSGhost; it is an RSOther server with no hosts list. + +**RSOther** servers may be hidden, starting up, or recovering. They cannot be queried, but their hosts lists are useful +for discovering the current replica set configuration. + +If a [hidden member](https://www.mongodb.com/docs/manual/core/replica-set-hidden-member/) is provided as a seed, the +client can use it to find the primary. Since the hidden member does not appear in the primary's host list, it will be +removed once the primary is checked. + +#### error + +If the client experiences any error when checking a server, it stores error information in the ServerDescription's error +field. + +#### roundTripTime + +Drivers MUST record the server's [round trip time](#round-trip-time) (RTT) after each successful call to hello or legacy +hello. The Server Selection Spec describes how RTT is averaged and how it is used in server selection. Drivers MUST also +record the server's minimum RTT per [Server Monitoring (Measuring RTT)](server-monitoring.rst#measuring-rtt). + +If a hello or legacy hello call fails, the RTT is not updated. Furthermore, while a server's type is Unknown its RTT is +null, and if it changes from a known type to Unknown its RTT is set to null. However, if it changes from one known type +to another (e.g. from RSPrimary to RSSecondary) its RTT is updated normally, not set to null nor restarted from scratch. + +#### lastWriteDate and opTime + +The hello or legacy hello response of a replica set member running MongoDB 3.4 and later contains a `lastWrite` +subdocument with fields `lastWriteDate` and `opTime` ([SERVER-8858](https://jira.mongodb.org/browse/SERVER-8858)). If +these fields are available, parse them from the hello or legacy hello response, otherwise set them to null. + +Clients MUST NOT attempt to compensate for the network latency between when the server generated its hello or legacy +hello response and when the client records `lastUpdateTime`. + +#### lastUpdateTime + +Clients SHOULD set lastUpdateTime with a monotonic clock. + +#### Hostnames are normalized to lower-case + +The same as with seeds provided in the initial configuration, all hostnames in the hello or legacy hello response's +"me", "hosts", "passives", and "arbiters" entries MUST be lower-cased. + +This prevents unnecessary work rediscovering a server if a seed "A" is provided and the server responds that "a" is in +the replica set. + +[RFC 4343](http://tools.ietf.org/html/rfc4343): + +> Domain Name System (DNS) names are "case insensitive". + +#### logicalSessionTimeoutMinutes + +MongoDB 3.6 and later include a `logicalSessionTimeoutMinutes` field if logical sessions are enabled in the deployment. +Clients MUST check for this field and set the ServerDescription's logicalSessionTimeoutMinutes field to this value, or +to null otherwise. + +#### topologyVersion + +MongoDB 4.4 and later include a `topologyVersion` field in all hello or legacy hello and +[State Change Error](#state-change-error) responses. Clients MUST check for this field and set the ServerDescription's +topologyVersion field to this value, if present. The topologyVersion helps the client and server determine the relative +freshness of topology information in concurrent messages. (See +[What is the purpose of topologyVersion?](#what-is-the-purpose-of-topologyversion)) + +The topologyVersion is a subdocument with two fields, "processId" and "counter": + +```typescript +{ + topologyVersion: {processId: , counter: }, + ( ... other fields ...) +} +``` + +##### topologyVersion Comparison + +To compare a topologyVersion from a hello or legacy hello or State Change Error response to the current +ServerDescription's topologyVersion: + +1. If the response topologyVersion is unset or the ServerDescription's topologyVersion is null, the client MUST assume + the response is more recent. +2. If the response's topologyVersion.processId is not equal to the ServerDescription's, the client MUST assume the + response is more recent. +3. If the response's topologyVersion.processId is equal to the ServerDescription's, the client MUST use the counter + field to determine which topologyVersion is more recent. + +See [Replacing the TopologyDescription](#replacing-the-topologydescription) for an example implementation of +topologyVersion comparison. + +#### serviceId + +MongoDB 5.0 and later, as well as any mongos-like service, include a `serviceId` field when the service is configured +behind a load balancer. + +#### Other ServerDescription fields + +Other required fields defined in the [ServerDescription](#serverdescription) data structure are parsed from the hello or +legacy hello response in the obvious way. + +#### Server Description Equality + +For the purpose of determining whether to publish SDAM events, two server descriptions having the same address MUST be +considered equal if and only if the values of [ServerDescription](#serverdescription) fields marked (=) are respectively +equal. + +This specification does not prescribe how to compare server descriptions with different addresses for equality. + +### Updating the TopologyDescription + +Each time the client checks a server, it processes the outcome (successful or not) to create a +[ServerDescription](#serverdescription), and then it processes the ServerDescription to update its +[TopologyDescription](#topologydescription). + +The TopologyDescription's [TopologyType](#topologytype) influences how the ServerDescription is processed. The following +subsection specifies how the client updates its TopologyDescription when the TopologyType is Single. The next subsection +treats the other types. + +#### TopologyType Single + +The TopologyDescription's type was initialized as Single and remains Single forever. There is always one +ServerDescription in TopologyDescription.servers. + +Whenever the client checks a server (successfully or not), and regardless of whether the new server description is equal +to the previous server description as defined in [Server Description Equality](#server-description-equality), the +ServerDescription in TopologyDescription.servers MUST be replaced with the new ServerDescription. + +##### Checking wire protocol compatibility + +A ServerDescription which is not Unknown is incompatible if: + +- minWireVersion > clientMaxWireVersion, or +- maxWireVersion \< clientMinWireVersion + +If any ServerDescription is incompatible, the client MUST set the TopologyDescription's "compatible" field to false and +fill out the TopologyDescription's "compatibilityError" field like so: + +- if ServerDescription.minWireVersion > clientMaxWireVersion: + + "Server at $host:$port requires wire version $minWireVersion, but this version of $driverName only supports up to + $clientMaxWireVersion." + +- if ServerDescription.maxWireVersion \< clientMinWireVersion: + + "Server at $host:$port reports wire version $maxWireVersion, but this version of $driverName requires at least + $clientMinWireVersion (MongoDB $mongoVersion)." + +Replace $mongoVersion with the appropriate MongoDB minor version, for example if clientMinWireVersion is 2 and it +connects to MongoDB 2.4, format the error like: + +> "Server at example.com:27017 reports wire version 0, but this version of My Driver requires at least 2 (MongoDB 2.6)." + +In this second case, the exact required MongoDB version is known and can be named in the error message, whereas in the +first case the implementer does not know which MongoDB versions will be compatible or incompatible in the future. + +##### Verifying setName with TopologyType Single + +A client MAY allow the user to supply a setName with an initial TopologyType of Single. In this case, if the +ServerDescription's setName is null or wrong, the ServerDescription MUST be replaced with a default ServerDescription of +type Unknown. + +#### TopologyType LoadBalanced + +See the [Load Balancer Specification](../load-balancers/load-balancers.md#server-discovery-logging-and-monitoring) for +details. + +#### Other TopologyTypes + +If the TopologyType is **not** Single, the topology can contain zero or more servers. The state of topology containing +zero servers is terminal (because servers can only be added if they are reported by a server already in the topology). A +client SHOULD emit a warning if it is constructed with no seeds in the initial seed list. A client SHOULD emit a warning +when, in the process of updating its topology description, it removes the last server from the topology. + +Whenever a client completes a hello or legacy hello call, it creates a new ServerDescription with the proper +[ServerType](#servertype). It replaces the server's previous description in TopologyDescription.servers with the new +one. + +Apply the logic for [checking wire protocol compatibility](#checking-wire-protocol-compatibility) to each +ServerDescription in the topology. If any server's wire protocol version range does not overlap with the client's, the +client updates the "compatible" and "compatibilityError" fields as described above for TopologyType Single. Otherwise +"compatible" is set to true. + +It is possible for a multi-threaded client to receive a hello or legacy hello outcome from a server after the server has +been removed from the TopologyDescription. For example, a monitor begins checking a server "A", then a different monitor +receives a response from the primary claiming that "A" has been removed from the replica set, so the client removes "A" +from the TopologyDescription. Then, the check of server "A" completes. + +In all cases, the client MUST ignore hello or legacy hello outcomes from servers that are not in the +TopologyDescription. + +The following subsections explain in detail what actions the client takes after replacing the ServerDescription. + +##### TopologyType table + +The new ServerDescription's type is the vertical axis, and the current TopologyType is the horizontal. Where a +ServerType and a TopologyType intersect, the table shows what action the client takes. + +"no-op" means, do nothing **after** replacing the server's old description with the new one. + +| | TopologyType Unknown | TopologyType Sharded | TopologyType ReplicaSetNoPrimary | TopologyType ReplicaSetWithPrimary | +| ---------------------- | ----------------------------------------------------------------------------------------------- | -------------------- | ------------------------------------------------------------------------------------------- | --------------------------------------------------------------- | +| ServerType Unknown | no-op | no-op | no-op | [checkIfHasPrimary](#checkifhasprimary) | +| ServerType Standalone | [updateUnknownWithStandalone](#updateunknownwithstandalone) | [remove](#remove) | [remove](#remove) | [remove](#remove) and [checkIfHasPrimary](#checkifhasprimary) | +| ServerType Mongos | Set topology type to Sharded | no-op | [remove](#remove) | [remove](#remove) and [checkIfHasPrimary](#checkifhasprimary) | +| ServerType RSPrimary | Set topology type to ReplicaSetWithPrimary then [updateRSFromPrimary](#updatersfromprimary) | [remove](#remove) | Set topology type to ReplicaSetWithPrimary then [updateRSFromPrimary](#updatersfromprimary) | [updateRSFromPrimary](#updatersfromprimary) | +| ServerType RSSecondary | Set topology type to ReplicaSetNoPrimary then [updateRSWithoutPrimary](#updaterswithoutprimary) | [remove](#remove) | [updateRSWithoutPrimary](#updaterswithoutprimary) | [updateRSWithPrimaryFromMember](#updaterswithprimaryfrommember) | +| ServerType RSArbiter | Set topology type to ReplicaSetNoPrimary then [updateRSWithoutPrimary](#updaterswithoutprimary) | [remove](#remove) | [updateRSWithoutPrimary](#updaterswithoutprimary) | [updateRSWithPrimaryFromMember](#updaterswithprimaryfrommember) | +| ServerType RSOther | Set topology type to ReplicaSetNoPrimary then [updateRSWithoutPrimary](#updaterswithoutprimary) | [remove](#remove) | [updateRSWithoutPrimary](#updaterswithoutprimary) | [updateRSWithPrimaryFromMember](#updaterswithprimaryfrommember) | +| ServerType RSGhost | no-op[^2] | [remove](#remove) | no-op | [checkIfHasPrimary](#checkifhasprimary) | + +##### TopologyType explanations + +This subsection complements the [TopologyType table](#topologytype-table) with prose explanations of the TopologyTypes +(besides Single and LoadBalanced). + +TopologyType Unknown\ +A starting state. + +**Actions**: + +- If the incoming ServerType is Unknown (that is, the hello or legacy hello call failed), keep the server in + TopologyDescription.servers. The TopologyType remains Unknown. +- The + [TopologyType remains Unknown when an RSGhost is discovered](#topologytype-remains-unknown-when-an-rsghost-is-discovered), + too. +- If the type is Standalone, run [updateUnknownWithStandalone](#updateunknownwithstandalone). +- If the type is Mongos, set the TopologyType to Sharded. +- If the type is RSPrimary, record its setName and call [updateRSFromPrimary](#updatersfromprimary). +- If the type is RSSecondary, RSArbiter or RSOther, record its setName, set the TopologyType to ReplicaSetNoPrimary, and + call [updateRSWithoutPrimary](#updaterswithoutprimary). + +TopologyType Sharded\ +A steady state. Connected to one or more mongoses. + +**Actions**: + +- If the server is Unknown or Mongos, keep it. +- Remove others. + +TopologyType ReplicaSetNoPrimary\ +A starting state. The topology is definitely a replica set, but no primary is known. + +**Actions**: + +- Keep Unknown servers. +- Keep RSGhost servers: they are members of some replica set, perhaps this one, and may recover. (See + [RSGhost and RSOther](#rsghost-and-rsother).) +- Remove any Standalones or Mongoses. +- If the type is RSPrimary call [updateRSFromPrimary](#updatersfromprimary). +- If the type is RSSecondary, RSArbiter or RSOther, run [updateRSWithoutPrimary](#updaterswithoutprimary). + +TopologyType ReplicaSetWithPrimary\ +A steady state. The primary is known. + +**Actions**: + +- If the server type is Unknown, keep it, and run [checkIfHasPrimary](#checkifhasprimary). +- Keep RSGhost servers: they are members of some replica set, perhaps this one, and may recover. (See + [RSGhost and RSOther](#rsghost-and-rsother).) Run [checkIfHasPrimary](#checkifhasprimary). +- Remove any Standalones or Mongoses and run [checkIfHasPrimary](#checkifhasprimary). +- If the type is RSPrimary run [updateRSFromPrimary](#updatersfromprimary). +- If the type is RSSecondary, RSArbiter or RSOther, run [updateRSWithPrimaryFromMember](#updaterswithprimaryfrommember). + +#### Actions + +##### updateUnknownWithStandalone + +This subroutine is executed with the ServerDescription from Standalone when the TopologyType is Unknown: + +```python +if description.address not in topologyDescription.servers: + return + +if settings.seeds has one seed: + topologyDescription.type = Single +else: + remove this server from topologyDescription and stop monitoring it +``` + +See +[TopologyType remains Unknown when one of the seeds is a Standalone](#topologytype-remains-unknown-when-one-of-the-seeds-is-a-standalone). + +##### updateRSWithoutPrimary + +This subroutine is executed with the ServerDescription from an RSSecondary, RSArbiter, or RSOther when the TopologyType +is ReplicaSetNoPrimary: + +```python +if description.address not in topologyDescription.servers: + return + +if topologyDescription.setName is null: + topologyDescription.setName = description.setName + +else if topologyDescription.setName != description.setName: + remove this server from topologyDescription and stop monitoring it + return + +for each address in description's "hosts", "passives", and "arbiters": + if address is not in topologyDescription.servers: + add new default ServerDescription of type "Unknown" + begin monitoring the new server + +if description.primary is not null: + find the ServerDescription in topologyDescription.servers whose + address equals description.primary + + if its type is Unknown, change its type to PossiblePrimary + +if description.address != description.me: + remove this server from topologyDescription and stop monitoring it + return +``` + +Unlike [updateRSFromPrimary](#updatersfromprimary), this subroutine does **not** remove any servers from the +TopologyDescription based on the list of servers in the "hosts" field of the hello or legacy hello response. The only +server that might be removed is the server itself that the hello or legacy hello response is from. + +The special handling of description.primary ensures that a single-threaded client [scans](#scan) the possible primary +before other members. + +See [replica set monitoring with and without a primary](#replica-set-monitoring-with-and-without-a-primary). + +##### updateRSWithPrimaryFromMember + +This subroutine is executed with the ServerDescription from an RSSecondary, RSArbiter, or RSOther when the TopologyType +is ReplicaSetWithPrimary: + +```python +if description.address not in topologyDescription.servers: + # While we were checking this server, another thread heard from the + # primary that this server is not in the replica set. + return + +# SetName is never null here. +if topologyDescription.setName != description.setName: + remove this server from topologyDescription and stop monitoring it + checkIfHasPrimary() + return + +if description.address != description.me: + remove this server from topologyDescription and stop monitoring it + checkIfHasPrimary() + return + +# Had this member been the primary? +if there is no primary in topologyDescription.servers: + topologyDescription.type = ReplicaSetNoPrimary + + if description.primary is not null: + find the ServerDescription in topologyDescription.servers whose + address equals description.primary + + if its type is Unknown, change its type to PossiblePrimary +``` + +The special handling of description.primary ensures that a single-threaded client [scans](#scan) the possible primary +before other members. + +##### updateRSFromPrimary + +This subroutine is executed with a ServerDescription of type RSPrimary: + +```python +if serverDescription.address not in topologyDescription.servers: + return + +if topologyDescription.setName is null: + topologyDescription.setName = serverDescription.setName + +else if topologyDescription.setName != serverDescription.setName: + # We found a primary but it doesn't have the setName + # provided by the user or previously discovered. + remove this server from topologyDescription and stop monitoring it + checkIfHasPrimary() + return + +# Election ids are ObjectIds, see +# see "Using electionId and setVersion to detect stale primaries" +# for comparison rules. + +if serverDescription.maxWireVersion >= 17: # MongoDB 6.0+ + # Null values for both electionId and setVersion are always considered less than + if serverDescription.electionId > topologyDescription.maxElectionId or ( + serverDescription.electionId == topologyDescription.maxElectionId + and serverDescription.setVersion >= topologyDescription.maxSetVersion + ): + topologyDescription.maxElectionId = serverDescription.electionId + topologyDescription.maxSetVersion = serverDescription.setVersion + else: + # Stale primary. + # replace serverDescription with a default ServerDescription of type "Unknown" + checkIfHasPrimary() + return +else: + # Maintain old comparison rules, namely setVersion is checked before electionId + if serverDescription.setVersion is not null and serverDescription.electionId is not null: + if ( + topologyDescription.maxSetVersion is not null + and topologyDescription.maxElectionId is not null + and ( + topologyDescription.maxSetVersion > serverDescription.setVersion + or ( + topologyDescription.maxSetVersion == serverDescription.setVersion + and topologyDescription.maxElectionId > serverDescription.electionId + ) + ) + ): + # Stale primary. + # replace serverDescription with a default ServerDescription of type "Unknown" + checkIfHasPrimary() + return + + topologyDescription.maxElectionId = serverDescription.electionId + + if serverDescription.setVersion is not null and ( + topologyDescription.maxSetVersion is null + or serverDescription.setVersion > topologyDescription.maxSetVersion + ): + topologyDescription.maxSetVersion = serverDescription.setVersion + + +for each server in topologyDescription.servers: + if server.address != serverDescription.address: + if server.type is RSPrimary: + # See note below about invalidating an old primary. + replace the server with a default ServerDescription of type "Unknown" + +for each address in serverDescription's "hosts", "passives", and "arbiters": + if address is not in topologyDescription.servers: + add new default ServerDescription of type "Unknown" + begin monitoring the new server + +for each server in topologyDescription.servers: + if server.address not in serverDescription's "hosts", "passives", or "arbiters": + remove the server and stop monitoring it + +checkIfHasPrimary() +``` + +A note on invalidating the old primary: when a new primary is discovered, the client finds the previous primary (there +should be none or one) and replaces its description with a default ServerDescription of type "Unknown." A multi-threaded +client MUST [request an immediate check](server-monitoring.rst#requesting-an-immediate-check) for that server as soon as +possible. + +If the old primary server version is 4.0 or earlier, the client MUST clear its connection pool for the old primary, too: +the connections are all bad because the old primary has closed its sockets. If the old primary server version is 4.2 or +newer, the client MUST NOT clear its connection pool for the old primary. + +See [replica set monitoring with and without a primary](#replica-set-monitoring-with-and-without-a-primary). + +If the server is primary with an obsolete electionId or setVersion, it is likely a stale primary that is going to step +down. Mark it Unknown and let periodic monitoring detect when it becomes secondary. See +[using electionId and setVersion to detect stale primaries](#using-electionid-and-setversion-to-detect-stale-primaries). + +A note on checking "me": Unlike `updateRSWithPrimaryFromMember`, there is no need to remove the server if the address is +not equal to "me": since the server address will not be a member of either "hosts", "passives", or "arbiters", the +server will already have been removed. + +##### checkIfHasPrimary + +Set TopologyType to ReplicaSetWithPrimary if there is an RSPrimary in TopologyDescription.servers, otherwise set it to +ReplicaSetNoPrimary. + +For example, if the TopologyType is ReplicaSetWithPrimary and the client is processing a new ServerDescription of type +Unknown, that could mean the primary just disconnected, so checkIfHasPrimary must run to check if the TopologyType +should become ReplicaSetNoPrimary. + +Another example is if the client first reaches the primary via its external IP, but the response's host list includes +only internal IPs. In that case the client adds the primary's internal IP to the TopologyDescription and begins +monitoring it, and removes the external IP. Right after removing the external IP from the description, the TopologyType +MUST be ReplicaSetNoPrimary, since no primary is available at this moment. + +##### remove + +Remove the server from TopologyDescription.servers and stop monitoring it. + +In multi-threaded clients, a monitor may be currently checking this server and may not immediately abort. Once the check +completes, this server's hello or legacy hello outcome MUST be ignored, and the monitor SHOULD halt. + +#### Logical Session Timeout + +Whenever a client updates the TopologyDescription from a hello or legacy hello response, it MUST set +TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes value among +ServerDescriptions of all data-bearing server types. If any have a null logicalSessionTimeoutMinutes, then +TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. + +See the Driver Sessions Spec for the purpose of this value. + +### Connection Pool Management + +For drivers that support connection pools, after a server check is completed successfully, if the server is determined +to be [data-bearing](server-discovery-and-monitoring.md#data-bearing-server-type) or a +[direct connection](server-discovery-and-monitoring.md#general-requirements) to the server is requested, and does not +already have a connection pool, the driver MUST create the connection pool for the server. Additionally, if a driver +implements a CMAP compliant connection pool, the server's pool (even if it already existed) MUST be marked as "ready". +See the [Server Monitoring spec](server-monitoring.rst) for more information. + +Clearing the connection pool for a server MUST be synchronized with the update to the corresponding ServerDescription +(e.g. by holding the lock on the TopologyDescription when clearing the pool). This prevents a possible race between the +monitors and application threads. See +[Why synchronize clearing a server's pool with updating the topology?](#why-synchronize-clearing-a-servers-pool-with-updating-the-topology) +for more information. + +### Error handling + +#### Network error during server check + +See error handling in the [Server Monitoring spec](server-monitoring.rst). + +#### Application errors + +When processing a network or command error, clients MUST first check the error's \[generation number\](#generation +number). If the error's generation number is equal to the pool's generation number then error handling MUST continue +according to [Network error when reading or writing](#network-error-when-reading-or-writing) or +["not writable primary" and "node is recovering"](#not-writable-primary-and-node-is-recovering). Otherwise, the error is +considered stale and the client MUST NOT update any topology state. (See +[Why ignore errors based on CMAP's generation number?](#why-ignore-errors-based-on-cmaps-generation-number)) + +##### Error handling pseudocode + +Application operations can fail in various places, for example: + +- A network error, network timeout, or command error may occur while establishing a new connection. Establishing a + connection includes the MongoDB handshake and completing authentication (if configured). +- A network error or network timeout may occur while reading or writing to an established connection. +- A command error may be returned from the server. +- A "writeConcernError" field may be included in the command response. + +Depending on the context, these errors may update SDAM state by marking the server Unknown and may clear the server's +connection pool. Some errors also require other side effects, like cancelling a check or requesting an immediate check. +Drivers may use the following pseudocode to guide their implementation: + +```python +def handleError(error): + address = error.address + topologyVersion = error.topologyVersion + + with client.lock: + # Ignore stale errors based on generation and topologyVersion. + if isStaleError(client.topologyDescription, error) + return + + if isStateChangeError(error): + # Don't mark server unknown in load balanced mode. + if type != LoadBalanced + # Mark the server Unknown + unknown = new ServerDescription(type=Unknown, error=error, topologyVersion=topologyVersion) + onServerDescriptionChanged(unknown, connection pool for server) + if isShutdown(code) or (error was from <4.2): + # the pools must only be cleared while the lock is held. + if type == LoadBalanced: + clear connection pool for serviceId + else: + clear connection pool for server + if multi-threaded: + request immediate check + else: + # Check right now if this is "not writable primary", since it might be a + # useful secondary. If it's "node is recovering" leave it for the + # next full scan. + if isNotWritablePrimary(error): + check failing server + elif isNetworkError(error) or (not error.completedHandshake and (isNetworkTimeout(error) or isAuthError(error))): + if type != LoadBalanced + # Mark the server Unknown + unknown = new ServerDescription(type=Unknown, error=error) + onServerDescriptionChanged(unknown, connection pool for server) + clear connection pool for server + else + if serviceId + clear connection pool for serviceId + # Cancel inprogress check + cancel monitor check + +def isStaleError(topologyDescription, error): + currentServer = topologyDescription.servers[server.address] + currentGeneration = currentServer.pool.generation + generation = get connection generation from error + if generation < currentGeneration: + # Stale generation number. + return True + + currentTopologyVersion = currentServer.topologyVersion + # True if the current error's topologyVersion is greater than the server's + # We use >= instead of > because any state change should result in a new topologyVersion + return compareTopologyVersion(currentTopologyVersion, error.commandResponse.get("topologyVersion")) >= 0 +``` + +The following pseudocode checks a response for a "not master" or "node is recovering" error: + +```python +recoveringCodes = [11600, 11602, 13436, 189, 91] +notWritablePrimaryCodes = [10107, 13435, 10058] +shutdownCodes = [11600, 91] + +def isRecovering(message, code): + if code: + if code in recoveringCodes: + return true + else: + # if no code, use the error message. + return ("not master or secondary" in message + or "node is recovering" in message) + +def isNotWritablePrimary(message, code): + if code: + if code in notWritablePrimaryCodes: + return true + else: + # if no code, use the error message. + if isRecovering(message, None): + return false + return ("not master" in message) + +def isShutdown(code): + if code and code in shutdownCodes: + return true + return false + +def isStateChangeError(error): + message = error.errmsg + code = error.code + return isRecovering(message, code) or isNotWritablePrimary(message, code) + +def parseGle(response): + if "err" in response: + handleError(CommandError(response, response["err"], response["code"])) + +# Parse response to any command besides getLastError. +def parseCommandResponse(response): + if not response["ok"]: + handleError(CommandError(response, response["errmsg"], response["code"])) + else if response["writeConcernError"]: + wce = response["writeConcernError"] + handleError(WriteConcernError(response, wce["errmsg"], wce["code"])) + +def parseQueryResponse(response): + if the "QueryFailure" bit is set in response flags: + handleError(CommandError(response, response["$err"], response["code"])) +``` + +The following sections describe the handling of different classes of application errors in detail including network +errors, network timeout errors, state change errors, and authentication errors. + +##### Network error when reading or writing + +To describe how the client responds to network errors during application operations, we distinguish two phases of +connecting to a server and using it for application operations: + +- *Before the handshake completes*: the client establishes a new connection to the server and completes an initial + handshake by calling "hello" or legacy hello and reading the response, and optionally completing authentication +- *After the handshake completes*: the client uses the established connection for application operations + +If there is a network error or timeout on the connection before the handshake completes, the client MUST replace the +server's description with a default ServerDescription of type Unknown when the TopologyType is not LoadBalanced, and +fill the ServerDescription's error field with useful information. + +If there is a network error or timeout on the connection before the handshake completes, and the TopologyType is +LoadBalanced, the client MUST keep the ServerDescription as LoadBalancer. + +If there is a network timeout on the connection after the handshake completes, the client MUST NOT mark the server +Unknown. (A timeout may indicate a slow operation on the server, rather than an unavailable server.) If, however, there +is some other network error on the connection after the handshake completes, the client MUST replace the server's +description with a default ServerDescription of type Unknown if the TopologyType is not LoadBalanced, and fill the +ServerDescription's error field with useful information, the same as if an error or timeout occurred before the +handshake completed. + +When the client marks a server Unknown due to a network error or timeout, the Unknown ServerDescription MUST be sent +through the same process for [updating the TopologyDescription](#updating-the-topologydescription) as if it had been a +failed hello or legacy hello outcome from a server check: for example, if the TopologyType is ReplicaSetWithPrimary and +a write to the RSPrimary server fails because of a network error (other than timeout), then a new ServerDescription is +created for the primary, with type Unknown, and the client executes the proper subroutine for an Unknown server when the +TopologyType is ReplicaSetWithPrimary: referring to the table above we see the subroutine is +[checkIfHasPrimary](#checkifhasprimary). The result is the TopologyType changes to ReplicaSetNoPrimary. See the test +scenario called "Network error writing to primary". + +The client MUST close all idle sockets in its connection pool for the server: if one socket is bad, it is likely that +all are. + +Clients MUST NOT request an immediate check of the server; since application sockets are used frequently, a network +error likely means the server has just become unavailable, so an immediate refresh is likely to get a network error, +too. + +The server will not remain Unknown forever. It will be refreshed by the next periodic check or, if an application +operation needs the server sooner than that, then a re-check will be triggered by the server selection algorithm. + +##### "not writable primary" and "node is recovering" + +These errors are detected from a getLastError response, write command response, or query response. Clients MUST check if +the server error is a "node is recovering" error or a "not writable primary" error. + +If the response includes an error code, it MUST be solely used to determine if error is a "node is recovering" or "not +writable primary" error. Clients MUST match the errors by the numeric error code and not by the code name, as the code +name can change from one server version to the next. + +The following error codes indicate a replica set member is temporarily unusable. These are called "node is recovering" +errors: + +| Error Name | Error Code | +| ------------------------------- | ---------- | +| InterruptedAtShutdown | 11600 | +| InterruptedDueToReplStateChange | 11602 | +| NotPrimaryOrSecondary | 13436 | +| PrimarySteppedDown | 189 | +| ShutdownInProgress | 91 | + +And the following error codes indicate a "not writable primary" error: + +| Error Name | Error Code | +| ----------------------- | ---------- | +| NotWritablePrimary | 10107 | +| NotPrimaryNoSecondaryOk | 13435 | +| LegacyNotPrimary | 10058 | + +Clients MUST fallback to checking the error message if and only if the response does not include an error code. The +error is considered a "node is recovering" error if the substrings "node is recovering" or "not master or secondary" are +anywhere in the error message. Otherwise, if the substring "not master" is in the error message it is a "not writable +primary" error. + +Additionally, if the response includes a write concern error, then the code and message of the write concern error MUST +be checked the same way a response error is checked above. + +Errors contained within the writeErrors field MUST NOT be checked. + +See the test scenario called "parsing 'not writable primary' and 'node is recovering' errors" for example response +documents. + +When the client sees a "not writable primary" or "node is recovering" error and the error's +[topologyVersion](#topologyversion) is strictly greater than the current ServerDescription's topologyVersion it MUST +replace the server's description with a ServerDescription of type Unknown. Clients MUST store useful information in the +new ServerDescription's error field, including the error message from the server. Clients MUST store the error's +[topologyVersion](#topologyversion) field in the new ServerDescription if present. (See +[What is the purpose of topologyVersion?](#what-is-the-purpose-of-topologyversion)) + +Multi-threaded and asynchronous clients MUST +[request an immediate check](server-monitoring.rst#requesting-an-immediate-check) of the server. Unlike in the "network +error" scenario above, a "not writable primary" or "node is recovering" error means the server is available but the +client is wrong about its type, thus an immediate re-check is likely to provide useful information. + +For single-threaded clients, in the case of a "not writable primary" or "node is shutting down" error, the client MUST +mark the topology as "stale" so the next server selection scans all servers. For a "node is recovering" error, +single-threaded clients MUST NOT mark the topology as "stale". If a node is recovering for some time, an immediate scan +may not gain useful information. + +The following subset of "node is recovering" errors is defined to be "node is shutting down" errors: + +| Error Name | Error Code | +| --------------------- | ---------- | +| InterruptedAtShutdown | 11600 | +| ShutdownInProgress | 91 | + +When handling a "not writable primary" or "node is recovering" error, the client MUST clear the server's connection pool +if and only if the error is "node is shutting down" or the error originated from server version \< 4.2. + +(See +[when does a client see "not writable primary" or "node is recovering"?](#when-does-a-client-see-not-writable-primary-or-node-is-recovering), +[use error messages to detect "not master" and "node is recovering"](#use-error-messages-to-detect-not-master-and-node-is-recovering), +and [other transient errors](#other-transient-errors) and +[Why close connections when a node is shutting down?](#why-close-connections-when-a-node-is-shutting-down).) + +##### Authentication errors + +If the authentication handshake fails for a connection, drivers MUST mark the server Unknown and clear the server's +connection pool if the TopologyType is not LoadBalanced. (See +[Why mark a server Unknown after an auth error?](#why-mark-a-server-unknown-after-an-auth-error)) + +### Monitoring SDAM events + +The required driver specification for providing lifecycle hooks into server discovery and monitoring for applications to +consume can be found in the [SDAM Monitoring Specification](server-discovery-and-monitoring-logging-and-monitoring.rst). + +### Implementation notes + +This section intends to provide generous guidance to driver authors. It is complementary to the reference +implementations. Words like "should", "may", and so on are used more casually here. + +See also, the implementation notes in the [Server Monitoring spec](server-monitoring.rst). + +#### Multi-threaded or asynchronous server selection + +While no suitable server is available for an operation, +[the client MUST re-check all servers every minHeartbeatFrequencyMS](#the-client-must-re-check-all-servers-every-minheartbeatfrequencyms). +(See [requesting an immediate check](server-monitoring.rst#requesting-an-immediate-check).) + +#### Single-threaded server selection + +When a client that uses [single-threaded monitoring](server-monitoring.rst#single-threaded-monitoring) fails to select a +suitable server for any operation, it [scans](#scan) the servers, then attempts selection again, to see if the scan +discovered suitable servers. It repeats, waiting [minHeartbeatFrequencyMS](#minheartbeatfrequencyms) after each scan, +until a timeout. + +#### Documentation + +##### Giant seed lists + +Drivers' manuals should warn against huge seed lists, since it will slow initialization for single-threaded clients and +generate load for multi-threaded and asynchronous drivers. + +#### Multi-threaded + +#### Warning about the maxWireVersion from a monitor's hello or legacy hello response + +Clients consult some fields from a server's hello or legacy hello response to decide how to communicate with it: + +- maxWireVersion +- maxBsonObjectSize +- maxMessageSizeBytes +- maxWriteBatchSize + +It is tempting to take these values from the last hello or legacy hello response a *monitor* received and store them in +the ServerDescription, but this is an anti-pattern. Multi-threaded and asynchronous clients that do so are prone to +several classes of race, for example: + +- Setup: A MongoDB 3.0 Standalone with authentication enabled, the client must log in with SCRAM-SHA-1. +- The monitor thread discovers the server and stores maxWireVersion on the ServerDescription +- An application thread wants a socket, selects the Standalone, and is about to check the maxWireVersion on its + ServerDescription when... +- The monitor thread gets disconnected from server and marks it Unknown, with default maxWireVersion of 0. +- The application thread resumes, creates a socket, and attempts to log in using MONGODB-CR, since maxWireVersion is + *now* reported as 0. +- Authentication fails, the server requires SCRAM-SHA-1. + +Better to call hello or legacy hello for each new socket, as required by the [Auth Spec](../auth/auth.md), and use the +hello or legacy hello response associated with that socket for maxWireVersion, maxBsonObjectSize, etc.: all the fields +required to correctly communicate with the server. + +The hello or legacy hello responses received by monitors determine if the topology as a whole \[is compatible\](#is +compatible) with the driver, and which servers are suitable for selection. The monitors' responses should not be used to +determine how to format wire protocol messages to the servers. + +##### Immutable data + +Multi-threaded drivers should treat ServerDescriptions and TopologyDescriptions as immutable: the client replaces them, +rather than modifying them, in response to new information about the topology. Thus readers of these data structures can +simply acquire a reference to the current one and read it, without holding a lock that would block a monitor from making +further updates. + +##### Process one hello or legacy hello outcome at a time + +Although servers are checked in parallel, the function that actually creates the new TopologyDescription should be +synchronized so only one thread can run it at a time. + +##### Replacing the TopologyDescription + +Drivers may use the following pseudocode to guide their implementation. The client object has a lock and a condition +variable. It uses the lock to ensure that only one new ServerDescription is processed at a time, and it must be acquired +before invoking this function. Once the client has taken the lock it must do no I/O: + +```python +def onServerDescriptionChanged(server, pool): + # "server" is the new ServerDescription. + # "pool" is the pool associated with the server + + if server.address not in client.topologyDescription.servers: + # The server was once in the topologyDescription, otherwise + # we wouldn't have been monitoring it, but an intervening + # state-change removed it. E.g., we got a host list from + # the primary that didn't include this server. + return + + newTopologyDescription = client.topologyDescription.copy() + + # Ignore this update if the current topologyVersion is greater than + # the new ServerDescription's. + if isStaleServerDescription(td, server): + return + + # Replace server's previous description. + address = server.address + newTopologyDescription.servers[address] = server + + # for drivers that implement CMAP, mark the connection pool as ready after a successful check + if (server.type in (Mongos, RSPrimary, RSSecondary, Standalone, LoadBalanced)) + or (server.type != Unknown and newTopologyDescription.type == Single): + pool.ready() + + take any additional actions, + depending on the TopologyType and server... + + # Replace TopologyDescription and notify waiters. + client.topologyDescription = newTopologyDescription + client.condition.notifyAll() + +def compareTopologyVersion(tv1, tv2): + """Return -1 if tv1tv2""" + if tv1 is None or tv2 is None: + # Assume greater. + return -1 + pid1 = tv1['processId'] + pid2 = tv2['processId'] + if pid1 == pid2: + counter1 = tv1['counter'] + counter2 = tv2['counter'] + if counter1 == counter2: + return 0 + elif counter1 < counter2: + return -1 + else: + return 1 + else: + # Assume greater. + return -1 + +def isStaleServerDescription(topologyDescription, server): + # True if the new ServerDescription's topologyVersion is greater than + # or equal to the current server's. + currentServer = topologyDescription.servers[server.address] + currentTopologyVersion = currentServer.topologyVersion + return compareTopologyVersion(currentTopologyVersion, server.topologyVersion) > 0 +``` + +Notifying the condition unblocks threads waiting in the server-selection loop for a suitable server to be discovered. + +> [!NOTE] +> The Java driver uses a CountDownLatch instead of a condition variable, and it atomically swaps the old and new +> CountDownLatches so it does not need "client.lock". It does, however, use a lock to ensure that only one thread runs +> onServerDescriptionChanged at a time. + +## Rationale + +### Clients do no I/O in the constructor + +An alternative proposal was to distinguish between "discovery" and "monitoring". When discovery begins, the client +checks all its seeds, and discovery is complete once all servers have been checked, or after some maximum time. +Application operations cannot proceed until discovery is complete. + +If the discovery phase is distinct, then single- and multi-threaded drivers could accomplish discovery in the +constructor, and throw an exception from the constructor if the deployment is unavailable or misconfigured. This is +consistent with prior behavior for many drivers. It will surprise some users that the constructor now succeeds, but all +operations fail. + +Similarly for misconfigured seed lists: the client may discover a mix of mongoses and standalones, or find multiple +replica set names. It may surprise some users that the constructor succeeds and the client attempts to proceed with a +compatible subset of the deployment. + +Nevertheless, this spec prohibits I/O in the constructor for the following reasons: + +#### Common case + +In the common case, the deployment is available and usable. This spec favors allowing operations to proceed as soon as +possible in the common case, at the cost of surprising behavior in uncommon cases. + +#### Simplicity + +It is simpler to omit a special discovery phase and treat all server [checks](#check) the same. + +#### Consistency + +Asynchronous clients cannot do I/O in a constructor, so it is consistent to prohibit I/O in other clients' constructors +as well. + +#### Restarts + +If clients can be constructed when the deployment is in some states but not in other states, it leads to an unfortunate +scenario: When the deployment is passing through a strange state, long-running clients may keep working, but any clients +restarted during this period fail. + +Say an administrator changes one replica set member's setName. Clients that are already constructed remove the bad +member and stay usable, but if any client is restarted its constructor fails. Web servers that dynamically adjust their +process pools will show particularly undesirable behavior. + +### heartbeatFrequencyMS defaults to 10 seconds or 60 seconds + +Many drivers have different values. The time has come to standardize. Lacking a rigorous methodology for calculating the +best frequency, this spec chooses 10 seconds for multi-threaded or asynchronous drivers because some already use that +value. + +Because scanning has a greater impact on the performance of single-threaded drivers, they MUST default to a longer +frequency (60 seconds). + +An alternative is to check servers less and less frequently the longer they remain unchanged. This idea is rejected +because it is a goal of this spec to answer questions about monitoring such as, + +- "How rapidly can I rotate a replica set to a new set of hosts?" +- "How soon after I add a secondary will query load be rebalanced?" +- "How soon will a client notice a change in round trip time, or tags?" + +Having a constant monitoring frequency allows us to answer these questions simply and definitively. Losing the ability +to answer these questions is not worth any minor gain in efficiency from a more complex scheduling method. + +### The client MUST re-check all servers every minHeartbeatFrequencyMS + +While an application is waiting to do an operation for which there is no suitable server, a multi-threaded client MUST +re-check all servers very frequently. The slight cost is worthwhile in many scenarios. For example: + +1. A client and a MongoDB server are started simultaneously. +2. The client checks the server before it begins listening, so the check fails. +3. The client waits in the server-selection loop for the topology to change. + +In this state, the client should check the server very frequently, to give it ample opportunity to connect to the server +before timing out in server selection. + +### No knobs + +This spec does not intend to introduce any new configuration options unless absolutely necessary. + +### The client MUST monitor arbiters + +Mongos 2.6 does not monitor arbiters, but it costs little to do so, and in the rare case that all data members are moved +to new hosts in a short time, an arbiter may be the client's last hope to find the new replica set configuration. + +### Only support replica set members running MongoDB 1.6.2 or later + +Replica set members began reporting their setNames in that version. Supporting earlier versions is impractical. + +### TopologyType remains Unknown when an RSGhost is discovered + +If the TopologyType is Unknown and the client receives a hello or legacy hello response from +an[RSGhost](#rsghost-and-rsother), the TopologyType could be set to ReplicaSetNoPrimary. However, an RSGhost does not +report its setName, so the setName would still be unknown. This adds an additional state to the existing list: +"TopologyType ReplicaSetNoPrimary **and** no setName." The additional state adds substantial complexity without any +benefit, so this spec says clients MUST NOT change the TopologyType when an RSGhost is discovered. + +### TopologyType remains Unknown when one of the seeds is a Standalone + +If TopologyType is Unknown and there are multiple seeds, and one of them is discovered to be a standalone, it MUST be +removed. The TopologyType remains Unknown. + +This rule supports the following common scenario: + +1. Servers A and B are in a replica set. +2. A seed list with A and B is stored in a configuration file. +3. An administrator removes B from the set and brings it up as standalone for maintenance, without changing its port + number. +4. The client is initialized with seeds A and B, TopologyType Unknown, and no setName. +5. The first hello or legacy hello response is from B, the standalone. + +What if the client changed TopologyType to Single at this point? It would be unable to use the replica set; it would +have to remove A from the TopologyDescription once A's hello or legacy hello response comes. + +The user's intent in this case is clearly to use the replica set, despite the outdated seed list. So this spec requires +clients to remove B from the TopologyDescription and keep the TopologyType as Unknown. Then when A's response arrives, +the client can set its TopologyType to ReplicaSet (with or without primary). + +On the other hand, if there is only one seed and the seed is discovered to be a Standalone, the TopologyType MUST be set +to Single. + +See the "member brought up as standalone" test scenario. + +### Replica set monitoring with and without a primary + +The client strives to fill the "servers" list only with servers that the **primary** said were members of the replica +set, when the client most recently contacted the primary. + +The primary's view of the replica set is authoritative for two reasons: + +1. The primary is never on the minority side of a network partition. During a partition it is the primary's list of + servers the client should use. +2. Since reconfigs must be executed on the primary, the primary is the first to know of them. Reconfigs propagate to + non-primaries eventually, but the client can receive hello or legacy hello responses from non-primaries that reflect + any past state of the replica set. See the "Replica set discovery" test scenario. + +If at any time the client believes there is no primary, the TopologyDescription's type is set to ReplicaSetNoPrimary. +While there is no known primary, the client MUST **add** servers from non-primaries' host lists, but it MUST NOT remove +servers from the TopologyDescription. + +Eventually, when a primary is discovered, any hosts not in the primary's host list are removed. + +### Using electionId and setVersion to detect stale primaries + +Replica set members running MongoDB 2.6.10+ or 3.0+ include an integer called "setVersion" and an ObjectId called +"electionId" in their hello or legacy hello response. Starting with MongoDB 3.2.0, replica sets can use two different +replication protocol versions; electionIds from one protocol version must not be compared to electionIds from a +different protocol. + +Because protocol version changes require replica set reconfiguration, clients use the tuple (electionId, setVersion) to +detect stale primaries. The tuple order comparison MUST be checked in the order of electionId followed by setVersion +since that order of comparison is guaranteed monotonicity. + +The client remembers the greatest electionId and setVersion reported by a primary, and distrusts primaries from older +electionIds or from the same electionId but with lesser setVersion. + +- It compares electionIds as 12-byte sequence i.e. memory comparison. +- It compares setVersions as integer values. + +This prevents the client from oscillating between the old and new primary during a split-brain period, and helps provide +read-your-writes consistency with write concern "majority" and read preference "primary". + +Prior to MongoDB server version 6.0 drivers had the logic opposite from the server side Replica Set Management logic by +ordering the tuple by `setVersion` before the `electionId`. In order to remain compatibility with backup systems, etc. +drivers continue to maintain the reversed logic when connected to a topology that reports a maxWireVersion less than +`17`. Server versions 6.0 and beyond MUST order the tuple by `electionId` then `setVersion`. + +#### Requirements for read-your-writes consistency + +Using (electionId, setVersion) only provides read-your-writes consistency if: + +- The application uses the same MongoClient instance for write-concern "majority" writes and read-preference "primary" + reads, and +- All members use MongoDB 2.6.10+, 3.0.0+ or 3.2.0+ with replication protocol 0 and clocks are *less* than 30 seconds + skewed, or +- All members run MongoDB 3.2.0 and replication protocol 1 and clocks are *less* skewed than the election timeout + (`electionTimeoutMillis`, which defaults to 10 seconds), or +- All members run MongoDB 3.2.1+ and replication protocol 1 (in which case clocks need not be synchronized). + +#### Scenario + +Consider the following situation: + +1. Server A is primary. +2. A network partition isolates A from the set, but the client still sees it. +3. Server B is elected primary. +4. The client discovers that B is primary, does a write-concern "majority" write operation on B and receives + acknowledgment. +5. The client receives a hello or legacy hello response from A, claiming A is still primary. +6. If the client trusts that A is primary, the next read-preference "primary" read sees stale data from A that may *not* + include the write sent to B. + +See [SERVER-17975](https://jira.mongodb.org/browse/SERVER-17975), "Stale reads with WriteConcern Majority and +ReadPreference Primary." + +#### Detecting a stale primary + +To prevent this scenario, the client uses electionId and setVersion to determine which primary was elected last. In this +case, it would not consider "A" a primary, nor read from it because server B will have a greater electionId but the same +setVersion. + +#### Monotonicity + +The electionId is an ObjectId compared bytewise in order. + +(ie. 000000000000000000000001 > 000000000000000000000000, FF0000000000000000000000 > FE0000000000000000000000 etc.) + +In some server versions, it is monotonic with respect to a particular servers' system clock, but is not globally +monotonic across a deployment. However, if inter-server clock skews are small, it can be treated as a monotonic value. + +In MongoDB 2.6.10+ (which has [SERVER-13542](https://jira.mongodb.org/browse/SERVER-13542) backported), MongoDB 3.0.0+ +or MongoDB 3.2+ (under replication protocol version 0), the electionId's leading bytes are a server timestamp. As long +as server clocks are skewed *less* than 30 seconds, electionIds can be reliably compared. (This is precise enough, +because in replication protocol version 0, servers are designed not to complete more than one election every 30 seconds. +Elections do not take 30 seconds--they are typically much faster than that--but there is a 30-second cooldown before the +next election can complete.) + +Beginning in MongoDB 3.2.0, under replication protocol version 1, the electionId begins with a timestamp, but the +cooldown is shorter. As long as inter-server clock skew is *less* than the configured election timeout +(`electionTimeoutMillis`, which defaults to 10 seconds), then electionIds can be reliably compared. + +Beginning in MongoDB 3.2.1, under replication protocol version 1, the electionId is guaranteed monotonic without relying +on any clock synchronization. + +### Using me field to detect seed list members that do not match host names in the replica set configuration + +Removal from the topology of seed list members where the "me" property does not match the address used to connect +prevents clients from being able to select a server, only to fail to re-select that server once the primary has +responded. + +This scenario illustrates the problems that arise if this is NOT done: + +- The client specifies a seed list of A, B, C +- Server A responds as a secondary with hosts D, E, F +- The client executes a query with read preference of secondary, and server A is selected +- Server B responds as a primary with hosts D, E, F. Servers A, B, C are removed, as they don't appear in the primary's + hosts list +- The client iterates the cursor and attempts to execute a getMore against server A. +- Server selection fails because server A is no longer part of the topology. + +With checking for "me" in place, it looks like this instead: + +- The client specifies a seed list of A, B, C +- Server A responds as a secondary with hosts D, E, F, where "me" is D, and so the client adds D, E, F as type "Unknown" + and starts monitoring them, but removes A from the topology. +- The client executes a query with read preference of secondary, and goes into the server selection loop +- Server D responds as a secondary where "me" is D +- Server selection completes by matching D +- The client iterates the cursor and attempts to execute a getMore against server D. +- Server selection completes by matching D. + +### Ignore setVersion unless the server is primary + +It was thought that if all replica set members report a setVersion, and a secondary's response has a higher setVersion +than any seen, that the secondary's host list could be considered as authoritative as the primary's. (See +[Replica set monitoring with and without a primary](#replica-set-monitoring-with-and-without-a-primary).) + +This scenario illustrates the problem with setVersion: + +- We have a replica set with servers A, B, and C. +- Server A is the primary, with setVersion 4. +- An administrator runs replSetReconfig on A, which increments its setVersion to 5. +- The client checks Server A and receives the new config. +- Server A crashes before any secondary receives the new config. +- Server B is elected primary. It has the old setVersion 4. +- The client ignores B's version of the config because its setVersion is not greater than 5. + +The client may never correct its view of the topology. + +Even worse: + +- An administrator runs replSetReconfig on Server B, which increments its setVersion to 5. +- Server A restarts. This results in *two* versions of the config, both claiming to be version 5. + +If the client trusted the setVersion in this scenario, it would trust whichever config it received first. + +mongos 2.6 ignores setVersion and only trusts the primary. This spec requires all clients to ignore setVersion from +non-primaries. + +### Use error messages to detect "not master" and "node is recovering" + +When error codes are not available, error messages are checked for the substrings "not master" and "node is recovering". +This is because older server versions returned unstable error codes or no error codes in many circumstances. + +### Other transient errors + +There are other transient errors a server may return, e.g. retryable errors listed in the retryable writes spec. SDAM +does not consider these because they do not imply the connected server should be marked as "Unknown". For example, the +following errors may be returned from a mongos when it cannot route to a shard: + +| Error Name | Error Code | +| --------------- | ---------- | +| HostNotFound | 7 | +| HostUnreachable | 6 | +| NetworkTimeout | 89 | +| SocketException | 9001 | + +When these are returned, the mongos should *not* be marked as "Unknown", since it is more likely an issue with the +shard. + +### Why ignore errors based on CMAP's generation number? + +Using CMAP's \[generation number\](#generation number) solves the following race condition among application threads and +the monitor during error handling: + +1. Two concurrent writes begin on application threads A and B. +2. The server restarts. +3. Thread A receives the first non-timeout network error, and the client marks the server Unknown, and clears the + server's pool. +4. The client re-checks the server and marks it Primary. +5. Thread B receives the second non-timeout network error and the client marks the server Unknown again. + +The core issue is that the client processes errors in arbitrary order and may overwrite fresh information about the +server's status with stale information. Using CMAP's generation number avoids the race condition because the duplicate +(or stale) network error can be identified (changes in **bold**): + +1. Two concurrent writes begin on application threads A and B, **with generation 1**. +2. The server restarts. +3. Thread A receives the first non-timeout network error, and the client marks the server Unknown, and clears the + server's pool. **The pool's generation is now 2.** +4. The client re-checks the server and marks it Primary. +5. Thread B receives the second non-timeout network error, **and the client ignores the error because the error + originated from a connection with generation 1.** + +### Why synchronize clearing a server's pool with updating the topology? + +Doing so solves the following race condition among application threads and the monitor during error handling, similar to +the previous example: + +1. A write begins on an application thread. +2. The server restarts. +3. The application thread receives a non-timeout network error. +4. The application thread acquires the lock on the TopologyDescription, marks the Server as Unknown, and releases the + lock. +5. The monitor re-checks the server and marks it Primary and its pool as "ready". +6. Several other application threads enter the WaitQueue of the server's pool. +7. The application thread clears the server's pool, evicting all those new threads from the WaitQueue, causing them to + return errors or to retry. Additionally, the pool is now "paused", but the server is considered the Primary, meaning + future operations will be routed to the server and fail until the next heartbeat marks the pool as "ready" again. + +If marking the server as Unknown and clearing its pool were synchronized, then the monitor marking the server as Primary +after its check would happen after the pool was cleared and thus avoid putting it an inconsistent state. + +### What is the purpose of topologyVersion? + +[topologyVersion](#topologyversion) solves the following race condition among application threads and the monitor when +handling \[State Change Errors\](#State Change Errors): + +1. Two concurrent writes begin on application threads A and B. +2. The primary steps down. +3. Thread A receives the first State Change Error, the client marks the server Unknown. +4. The client re-checks the server and marks it Secondary. +5. Thread B receives a delayed State Change Error and the client marks the server Unknown again. + +The core issue is that the client processes errors in arbitrary order and may overwrite fresh information about the +server's status with stale information. Using topologyVersion avoids the race condition because the duplicate (or stale) +State Change Errors can be identified (changes in **bold**): + +1. Two concurrent writes begin on application threads A and B. + 1. **The primary's ServerDescription.topologyVersion == tv1** +2. The primary steps down **and sets its topologyVersion to tv2**. +3. Thread A receives the first State Change Error **containing tv2**, the client marks the server Unknown (**with + topologyVersion: tv2**). +4. The client re-checks the server and marks it Secondary (**with topologyVersion: tv2**). +5. Thread B receives a delayed State Change Error (**with topologyVersion: tv2**) **and the client ignores the error + because the error's topologyVersion (tv2) is not greater than the current ServerDescription (tv2).** + +### Why mark a server Unknown after an auth error? + +The [Authentication spec](../auth/auth.md) requires that when authentication fails on a server, the driver MUST clear +the server's connection pool. Clearing the pool without marking the server Unknown would leave the pool in the "paused" +state while the server is still selectable. When auth fails due to invalid credentials, marking the server Unknown also +serves to rate limit new connections; future operations will need to wait for the server to be rediscovered. + +Note that authentication may fail for a variety of reasons, for example: + +- A network error, or network timeout error may occur. +- The server may return a [State Change Error](#state-change-error). +- The server may return a AuthenticationFailed command error (error code 18) indicating that the provided credentials + are invalid. + +Does this mean that authentication failures due to invalid credentials will manifest as server selection timeout errors? +No, authentication errors are still returned to the application immediately. A subsequent operation will block until the +server is rediscovered and immediately attempt authentication on a new connection. + +### Clients use the hostnames listed in the replica set config, not the seed list + +Very often users have DNS aliases they use in their [seed list](#seed-list) instead of the hostnames in the replica set +config. For example, the name "host_alias" might refer to a server also known as "host1", and the URI is: + +``` +mongodb://host_alias/?replicaSet=rs +``` + +When the client connects to "host_alias", its hello or legacy hello response includes the list of hostnames from the +replica set config, which does not include the seed: + +``` +{ + hosts: ["host1:27017", "host2:27017"], + setName: "rs", + ... other hello or legacy hello response fields ... +} +``` + +This spec requires clients to connect to the hostnames listed in the hello or legacy hello response. Furthermore, if the +response is from a primary, the client MUST remove all hostnames not listed. In this case, the client disconnects from +"host_alias" and tries "host1" and "host2". (See [updateRSFromPrimary](#updatersfromprimary).) + +Thus, replica set members must be reachable from the client by the hostnames listed in the replica set config. + +An alternative proposal is for clients to continue using the hostnames in the seed list. It could add new hosts from the +hello or legacy hello response, and where a host is known by two names, the client can deduplicate them using the "me" +field and prefer the name in the seed list. + +This proposal was rejected because it does not support key features of replica sets: failover and zero-downtime +reconfiguration. + +In our example, if "host1" and "host2" are not reachable from the client, the client continues to use "host_alias" only. +If that server goes down or is removed by a replica set reconfig, the client is suddenly unable to reach the replica set +at all: by allowing the client to use the alias, we have hidden the fact that the replica set's failover feature will +not work in a crisis or during a reconfig. + +In conclusion, to support key features of replica sets, we require that the hostnames used in a replica set config are +reachable from the client. + +## Backwards Compatibility + +The Java driver 2.12.1 has a "heartbeatConnectRetryFrequency". Since this spec recommends the option be named +"minHeartbeatFrequencyMS", the Java driver must deprecate its old option and rename it minHeartbeatFrequency (for +consistency with its other options which also lack the "MS" suffix). + +## Reference Implementation + +- Java driver 3.x +- PyMongo 3.x +- Perl driver 1.0.0 (in progress) + +## Future Work + +MongoDB is likely to add some of the following features, which will require updates to this spec: + +- Eventually consistent collections (SERVER-2956) +- Mongos discovery (SERVER-1834) +- Put individual databases into maintenance mode, instead of the whole server (SERVER-7826) +- Put setVersion in write-command responses (SERVER-13909) + +## Questions and Answers + +### When does a client see "not writable primary" or "node is recovering"? + +These errors indicate one of these: + +- A write was attempted on an unwritable server (arbiter, secondary, ghost, or recovering). +- A read was attempted on an unreadable server (arbiter, ghost, or recovering) or a read was attempted on a read-only + server without the secondaryOk bit set. +- An operation was attempted on a server that is now shutting down. + +In any case the error is a symptom that a ServerDescription's type no longer reflects reality. + +On MongoDB 4.0 and earlier, a primary closes its connections when it steps down, so in many cases the next operation +causes a network error rather than "not writable primary". The driver can see a "not writable primary" error in the +following scenario: + +1. The client discovers the primary. +2. The primary steps down. +3. Before the client checks the server and discovers the stepdown, the application attempts an operation. +4. The client's connection pool is empty, either because it has never attempted an operation on this server, or because + all connections are in use by other threads. +5. The client creates a connection to the old primary. +6. The client attempts to write, or to read without the secondaryOk bit, and receives "not writable primary". + +See ["not writable primary" and "node is recovering"](#not-writable-primary-and-node-is-recovering), and the test +scenario called "parsing 'not writable primary' and 'node is recovering' errors". + +### Why close connections when a node is shutting down? + +When a server shuts down, it will return one of the "node is shutting down" errors for each attempted operation and +eventually will close all connections. Keeping a connection to a server which is shutting down open would only produce +errors on this connection - such a connection will never be usable for any operations. In contrast, when a server 4.2 or +later returns "not writable primary" error the connection may be usable for other operations (such as secondary reads). + +### What's the point of periodic monitoring? + +Why not just wait until a "not writable primary" error or "node is recovering" error informs the client that its +TopologyDescription is wrong? Or wait until server selection fails to find a suitable server, and only scan all servers +then? + +Periodic monitoring accomplishes three objectives: + +- Update each server's type, tags, and [round trip time](#round-trip-time). Read preferences and the mongos selection + algorithm require this information remains up to date. +- Discover new secondaries so that secondary reads are evenly spread. +- Detect incremental changes to the replica set configuration, so that the client remains connected to the set even + while it is migrated to a completely new set of hosts. + +If the application uses some servers very infrequently, monitoring can also proactively detect state changes (primary +stepdown, server becoming unavailable) that would otherwise cause future errors. + +### Why is auto-discovery the preferred default? + +Auto-discovery is most resilient and is therefore preferred. + +### Why is it possible for maxSetVersion to go down? + +`maxElectionId` and `maxSetVersion` are actually considered a pair of values Drivers MAY consider implementing +comparison in code as a tuple of the two to ensure their always updated together: + +```typescript +// New tuple old tuple +{ electionId: 2, setVersion: 1 } > { electionId: 1, setVersion: 50 } +``` + +In this scenario, the maxSetVersion goes from 50 to 1, but the maxElectionId is raised to 2. + +## Acknowledgments + +Jeff Yemin's code for the Java driver 2.12, and his patient explanation thereof, is the major inspiration for this spec. +Mathias Stearn's beautiful design for replica set monitoring in mongos 2.6 contributed as well. Bernie Hackett gently +oversaw the specification process. + +## Changelog + +- 2024-05-08: Migrated from reStructuredText to Markdown. + +- 2015-12-17: Require clients to compare (setVersion, electionId) tuples. + +- 2015-10-09: Specify electionID comparison method. + +- 2015-06-16: Added cooldownMS. + +- 2016-05-04: Added link to SDAM monitoring. + +- 2016-07-18: Replace mentions of the "Read Preferences Spec" with "Server\ + Selection Spec", and + "secondaryAcceptableLatencyMS" with "localThresholdMS". + +- 2016-07-21: Updated for Max Staleness support. + +- 2016-08-04: Explain better why clients use the hostnames in RS config, not URI. + +- 2016-08-31: Multi-threaded clients SHOULD use hello or legacy hello replies to\ + update the topology when they + handshake application connections. + +- 2016-10-06: In updateRSWithoutPrimary the hello or legacy hello response's\ + "primary" field should be used to update + the topology description, even if address != me. + +- 2016-10-29: Allow for idleWritePeriodMS to change someday. + +- 2016-11-01: "Unknown" is no longer the default TopologyType, the default is now\ + explicitly unspecified. Update + instructions for setting the initial TopologyType when running the spec tests. + +- 2016-11-21: Revert changes that would allow idleWritePeriodMS to change in the\ + future. + +- 2017-02-28: Update "network error when reading or writing": timeout while\ + connecting does mark a server Unknown, + unlike a timeout while reading or writing. Justify the different behaviors, and also remove obsolete reference to + auto-retry. + +- 2017-06-13: Move socketCheckIntervalMS to Server Selection Spec. + +- 2017-08-01: Parse logicalSessionTimeoutMinutes from hello or legacy hello reply. + +- 2017-08-11: Clearer specification of "incompatible" logic. + +- 2017-09-01: Improved incompatibility error messages. + +- 2018-03-28: Specify that monitoring must not do mechanism negotiation or authentication. + +- 2019-05-29: Renamed InterruptedDueToStepDown to InterruptedDueToReplStateChange + +- 2020-02-13: Drivers must run SDAM flow even when server description is equal to\ + the last one. + +- 2020-03-31: Add topologyVersion to ServerDescription. Add rules for ignoring\ + stale application errors. + +- 2020-05-07: Include error field in ServerDescription equality comparison. + +- 2020-06-08: Clarify reasoning behind how SDAM determines if a topologyVersion is stale. + +- 2020-12-17: Mark the pool for a server as "ready" after performing a successful\ + check. Synchronize pool clearing with + SDAM updates. + +- 2021-01-17: Require clients to compare (electionId, setVersion) tuples. + +- 2021-02-11: Errors encountered during auth are handled by SDAM. Auth errors\ + mark the server Unknown and clear the + pool. + +- 2021-04-12: Adding in behaviour for load balancer mode. + +- 2021-05-03: Require parsing "isWritablePrimary" field in responses. + +- 2021-06-09: Connection pools must be created and eventually marked ready for\ + any server if a direct connection is + used. + +- 2021-06-29: Updated to use modern terminology. + +- 2022-01-19: Add iscryptd and 90th percentile RTT fields to ServerDescription. + +- 2022-07-11: Convert integration tests to the unified format. + +- 2022-09-30: Update `updateRSFromPrimary` to include logic before and after 6.0 servers + +- 2022-10-05: Remove spec front matter, move footnote, and reformat changelog. + +- 2022-11-17: Add minimum RTT tracking and remove 90th percentile RTT. + +- 2024-01-17: Add section on expected client close behaviour + +______________________________________________________________________ + +[^1]: "localThresholdMS" was called "secondaryAcceptableLatencyMS" in the Read Preferences Spec, before it was superseded + by the Server Selection Spec. + +[^2]: [TopologyType remains Unknown when an RSGhost is discovered](#topologytype-remains-unknown-when-an-rsghost-is-discovered). diff --git a/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst b/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst index 2594b090a7..ddd00719ec 100644 --- a/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst +++ b/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst @@ -1,2573 +1,4 @@ -=============================== -Server Discovery And Monitoring -=============================== - -:Status: Accepted -:Minimum Server Version: 2.4 - -.. contents:: - --------- - -Abstract --------- - -This spec defines how a MongoDB client discovers and monitors one or more servers. -It covers monitoring a single server, a set of mongoses, or a replica set. -How does the client determine what type of servers they are? -How does it keep this information up to date? -How does the client find an entire replica set from a seed list, -and how does it respond to a stepdown, election, reconfiguration, or network error? - -All drivers must answer these questions the same. -Or, where platforms' limitations require differences among drivers, -there must be as few answers as possible and each must be clearly explained in this spec. -Even in cases where several answers seem equally good, drivers must agree on one way to do it. - -MongoDB users and driver authors benefit from having one way to discover and monitor servers. -Users can substantially understand their driver's behavior without inspecting its code or asking its author. -Driver authors can avoid subtle mistakes -when they take advantage of a design that has been well-considered, reviewed, and tested. - -The server discovery and monitoring method is specified in four sections. -First, a client is `configured`_. -Second, it begins `monitoring`_ by calling `hello or legacy hello`_ on all servers. -(Multi-threaded and asynchronous monitoring is described first, -then single-threaded monitoring.) -Third, as hello or legacy hello responses are received -the client `parses them`_, -and fourth, it `updates its view of the topology`_. - -Finally, this spec describes how `drivers update their topology view -in response to errors`_, -and includes generous implementation notes for driver authors. - -This spec does not describe how a client chooses a server for an operation; -that is the domain of the Server Selection Spec. -But there is a section describing -the `interaction between monitoring and server selection`_. - -There is no discussion of driver architecture and data structures, -nor is there any specification of a user-facing API. -This spec is only concerned with the algorithm for monitoring the server topology. - -Meta ----- - -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL -NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and -"OPTIONAL" in this document are to be interpreted as described in -`RFC 2119`_. - -.. _RFC 2119: https://www.ietf.org/rfc/rfc2119.txt - -Specification -------------- - -General Requirements -'''''''''''''''''''' - -**Direct connections:** -A client MUST be able to connect to a single server of any type. -This includes querying hidden replica set members, -and connecting to uninitialized members (see `RSGhost`_) in order to run -"replSetInitiate". -Setting a read preference MUST NOT be necessary to connect to a secondary. -Of course, -the secondary will reject all operations done with the PRIMARY read preference -because the secondaryOk bit is not set, -but the initial connection itself succeeds. -Drivers MAY allow direct connections to arbiters -(for example, to run administrative commands). - -**Replica sets:** -A client MUST be able to discover an entire replica set from -a seed list containing one or more replica set members. -It MUST be able to continue monitoring the replica set -even when some members go down, -or when reconfigs add and remove members. -A client MUST be able to connect to a replica set -while there is no primary, or the primary is down. - -**Mongos:** -A client MUST be able to connect to a set of mongoses -and monitor their availability and `round trip time`_. -This spec defines how mongoses are discovered and monitored, -but does not define which mongos is selected for a given operation. - -Terms -''''' - -Server -`````` - -A mongod or mongos process, or a load balancer. - -Deployment -`````````` - -One or more servers: -either a standalone, a replica set, or one or more mongoses. - -Topology -```````` - -The state of the deployment: -its type (standalone, replica set, or sharded), -which servers are up, what type of servers they are, -which is primary, and so on. - -Client -`````` - -Driver code responsible for connecting to MongoDB. - -Seed list -````````` - -Server addresses provided to the client in its initial configuration, -for example from the `connection string`_. - -Data-Bearing Server Type -```````````````````````` - -A server type from which a client can receive application data: - -* Mongos -* RSPrimary -* RSSecondary -* Standalone -* LoadBalanced - -Round trip time -``````````````` - -Also known as RTT. - -The client's measurement of the duration of one hello or legacy hello call. -The round trip time is used to support the "localThresholdMS" [1]_ -option in the Server Selection Spec. - -.. [1] "localThresholdMS" was called "secondaryAcceptableLatencyMS" in the Read - Preferences Spec, before it was superseded by the Server Selection Spec. - -hello or legacy hello outcome -````````````````````````````` - -The result of an attempt to call the hello or legacy hello command on a server. -It consists of three elements: -a boolean indicating the success or failure of the attempt, -a document containing the command response (or null if it failed), -and the round trip time to execute the command (or null if it failed). - -.. _checks: #check - -check -````` - -The client checks a server by attempting to call hello or legacy hello on it, -and recording the outcome. - -.. _scans: #scan - -scan -```` - -The process of checking all servers in the deployment. - -suitable -```````` - -A server is judged "suitable" for an operation if the client can use it -for a particular operation. -For example, a write requires a standalone, primary, or mongos. -Suitability is fully specified in the `Server Selection Spec -<../server-selection/server-selection.md>`_. - -address -``````` - -The hostname or IP address, and port number, of a MongoDB server. - -network error -````````````` - -An error that occurs while reading from or writing to a network socket. - -network timeout -``````````````` - -A timeout that occurs while reading from or writing to a network socket. - - -minHeartbeatFrequencyMS -``````````````````````` - -Defined in the `Server Monitoring spec`_. This value MUST be 500 ms, and -it MUST NOT be configurable. - -.. _generation number: - -pool generation number -`````````````````````` - -The pool's generation number which starts at 0 and is incremented each time -the pool is cleared. Defined in the `Connection Monitoring and Pooling spec`_. - -connection generation number -```````````````````````````` - -The pool's generation number at the time this connection was created. -Defined in the `Connection Monitoring and Pooling spec`_. - -error generation number -``````````````````````` - -The error's generation number is the generation of the connection on which the -application error occurred. Note that when a network error occurs before the -handshake completes then the error's generation number is the generation of -the pool at the time the connection attempt was started. - -.. _State Change Errors: - -State Change Error -`````````````````` - -A server reply document indicating a "not writable primary" or "node is recovering" -error. Starting in MongoDB 4.4 these errors may also include a -`topologyVersion`_ field. - -Data structures -''''''''''''''' - -This spec uses a few data structures -to describe the client's view of the topology. -It must be emphasized that -a driver is free to implement the same behavior -using different data structures. -This spec uses these enums and structs in order to describe driver **behavior**, -not to mandate how a driver represents the topology, -nor to mandate an API. - -Constants -````````` - -clientMinWireVersion and clientMaxWireVersion -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Integers. The wire protocol range supported by the client. - -Enums -````` - -TopologyType -~~~~~~~~~~~~ - -Single, ReplicaSetNoPrimary, ReplicaSetWithPrimary, Sharded, LoadBalanced, or Unknown. - -See `updating the TopologyDescription`_. - -ServerType -~~~~~~~~~~ - -Standalone, Mongos, -PossiblePrimary, RSPrimary, RSSecondary, RSArbiter, RSOther, RSGhost, -LoadBalancer or Unknown. - -See `parsing a hello or legacy hello response`_. - -.. note:: Single-threaded clients use the PossiblePrimary type - to maintain proper `scanning order`_. - Multi-threaded and asynchronous clients do not need this ServerType; - it is synonymous with Unknown. - -TopologyDescription -``````````````````` - -The client's representation of everything it knows about the deployment's topology. - -Fields: - -* type: a `TopologyType`_ enum value. See `initial TopologyType`_. -* setName: the replica set name. Default null. -* maxElectionId: an ObjectId or null. The largest electionId ever reported by - a primary. Default null. Part of the (``electionId``, ``setVersion``) tuple. -* maxSetVersion: an integer or null. The largest setVersion ever reported by - a primary. It may not monotonically increase, as electionId takes precedence in ordering - Default null. Part of the (``electionId``, ``setVersion``) tuple. -* servers: a set of ServerDescription instances. - Default contains one server: "localhost:27017", ServerType Unknown. -* stale: a boolean for single-threaded clients, whether the topology must - be re-scanned. - (Not related to maxStalenessSeconds, nor to `stale primaries`_.) -* compatible: a boolean. - False if any server's wire protocol version range - is incompatible with the client's. - Default true. -* compatibilityError: a string. - The error message if "compatible" is false, otherwise null. -* logicalSessionTimeoutMinutes: integer or null. Default null. See - `logical session timeout`_. - -ServerDescription -````````````````` - -The client's view of a single server, -based on the most recent hello or legacy hello outcome. - -Again, drivers may store this information however they choose; -this data structure is defined here -merely to describe the monitoring algorithm. - -Fields: - -* address: the hostname or IP, and the port number, - that the client connects to. - Note that this is **not** the "me" field in the server's hello or legacy hello response, - in the case that the server reports an address different - from the address the client uses. -* (=) error: information about the last error related to this server. Default null. -* roundTripTime: the duration of the hello or legacy hello call. Default null. -* minRoundTripTime: the minimum RTT for the server. Default null. -* lastWriteDate: a 64-bit BSON datetime or null. - The "lastWriteDate" from the server's most recent hello or legacy hello response. -* opTime: an opTime or null. - An opaque value representing the position in the oplog of the most recently seen write. Default null. - (Only mongos and shard servers record this field when monitoring - config servers as replica sets, at least until `drivers allow applications to use readConcern "afterOptime". <../max-staleness/max-staleness.md#future-feature-to-support-readconcern-afteroptime>`_) -* (=) type: a `ServerType`_ enum value. Default Unknown. -* (=) minWireVersion, maxWireVersion: - the wire protocol version range supported by the server. - Both default to 0. - `Use min and maxWireVersion only to determine compatibility`_. -* (=) me: The hostname or IP, and the port number, that this server was - configured with in the replica set. Default null. -* (=) hosts, passives, arbiters: Sets of addresses. - This server's opinion of the replica set's members, if any. - These `hostnames are normalized to lower-case`_. - Default empty. - The client `monitors all three types of servers`_ in a replica set. -* (=) tags: map from string to string. Default empty. -* (=) setName: string or null. Default null. -* (=) electionId: an ObjectId, if this is a MongoDB 2.6+ replica set member that - believes it is primary. See `using electionId and setVersion to detect stale primaries`_. - Default null. -* (=) setVersion: integer or null. Default null. -* (=) primary: an address. This server's opinion of who the primary is. - Default null. -* lastUpdateTime: when this server was last checked. Default "infinity ago". -* (=) logicalSessionTimeoutMinutes: integer or null. Default null. -* (=) topologyVersion: A topologyVersion or null. Default null. - The "topologyVersion" from the server's most recent hello or legacy hello response or - `State Change Error`_. -* (=) iscryptd: boolean indicating if the server is a - `mongocryptd <../client-side-encryption/client-side-encryption.md#mongocryptd>`_ - server. Default null. - -"Passives" are priority-zero replica set members that cannot become primary. -The client treats them precisely the same as other members. - -Fields marked (=) are used for `Server Description Equality`_ comparison. - -.. _configured: #configuration - -Configuration -''''''''''''' - -No breaking changes -``````````````````` - -This spec does not intend -to require any drivers to make breaking changes regarding -what configuration options are available, -how options are named, -or what combinations of options are allowed. - -Initial TopologyDescription -``````````````````````````` - -The default values for `TopologyDescription`_ fields are described above. -Users may override the defaults as follows: - -Initial Servers -~~~~~~~~~~~~~~~ - -The user MUST be able to set the initial servers list to a `seed list`_ -of one or more addresses. - -The hostname portion of each address MUST be normalized to lower-case. - -Initial TopologyType -~~~~~~~~~~~~~~~~~~~~ - -If the ``directConnection`` URI option is specified when a MongoClient is -constructed, the TopologyType must be initialized based on the value of -the ``directConnection`` option and the presence of the ``replicaSet`` option -according to the following table: - -+------------------+-----------------------+-----------------------+ -| directConnection | replicaSet present | Initial TopologyType | -+==================+=======================+=======================+ -| true | no | Single | -+------------------+-----------------------+-----------------------+ -| true | yes | Single | -+------------------+-----------------------+-----------------------+ -| false | no | Unknown | -+------------------+-----------------------+-----------------------+ -| false | yes | ReplicaSetNoPrimary | -+------------------+-----------------------+-----------------------+ - -If the ``directConnection`` option is not specified, newly developed drivers -MUST behave as if it was specified with the false value. - -Since changing the starting topology can reasonably be considered a -backwards-breaking change, existing drivers SHOULD stage implementation -according to semantic versioning guidelines. Specifically, support for the -``directConnection`` URI option can be added in a minor release. -In a subsequent major release, the default starting topology can be changed -to Unknown. Drivers MUST document this in a prior minor release. - -Existing drivers MUST deprecate other URI options, if any, for controlling -topology discovery or specifying the deployment topology. If such a legacy -option is specified and the ``directConnection`` option is also -specified, and the values of the two options are semantically different, -the driver MUST report an error during URI option parsing. - -The API for initializing TopologyType using language-specific native options -is not specified here. Drivers might already have a convention, e.g. a single -seed means Single, a setName means ReplicaSetNoPrimary, and a list of seeds -means Unknown. There are variations, however: In the Java driver a single seed -means Single, but a **list** containing one seed means Unknown, so it can -transition to replica-set monitoring if the seed is discovered to be a -replica set member. In contrast, PyMongo requires a non-null setName in order -to begin replica-set monitoring, regardless of the number of seeds. -This spec does not cover language-specific native options that a driver may -provide. - -Initial setName -~~~~~~~~~~~~~~~ - -It is allowed to use ``directConnection=true`` in conjunction with the -``replicaSet`` URI option. The driver must connect in Single topology and -verify that setName matches the specified name, as per -`verifying setName with TopologyType Single`_. - -When a MongoClient is initialized using language-specific native options, -the user MUST be able to set the client's initial replica set name. -A driver MAY require the set name in order to connect to a replica set, -or it MAY be able to discover the replica set name as it connects. - -Allowed configuration combinations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Drivers MUST enforce: - -* TopologyType Single cannot be used with multiple seeds. -* ``directConnection=true`` cannot be used with multiple seeds. -* If setName is not null, only TopologyType ReplicaSetNoPrimary, - and possibly Single, - are allowed. - (See `verifying setName with TopologyType Single`_.) -* ``loadBalanced=true`` cannot be used in conjunction with - ``directConnection=true`` or ``replicaSet`` - -Handling of SRV URIs resolving to single host -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When a driver is given an SRV URI, if the ``directConnection`` URI option -is not specified, and the ``replicaSet`` URI option is not specified, the -driver MUST start in Unknown topology, and follow the rules in the -`TopologyType table`_ for transitioning to other topologies. In particular, -the driver MUST NOT use the number of hosts from the initial SRV lookup -to decide what topology to start in. - -heartbeatFrequencyMS -```````````````````` - -The interval between server `checks`_, counted from the end of the previous -check until the beginning of the next one. - -For multi-threaded and asynchronous drivers -it MUST default to 10 seconds and MUST be configurable. -For single-threaded drivers it MUST default to 60 seconds -and MUST be configurable. -It MUST be called heartbeatFrequencyMS -unless this breaks backwards compatibility. - -For both multi- and single-threaded drivers, -the driver MUST NOT permit users to configure it less than minHeartbeatFrequencyMS (500ms). - -(See `heartbeatFrequencyMS defaults to 10 seconds or 60 seconds`_ -and `what's the point of periodic monitoring?`_) - -Client construction -''''''''''''''''''' - -Except for `initial DNS seed list discovery -<../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md>`_ -when given a connection string with ``mongodb+srv`` scheme, -the client's constructor MUST NOT do any I/O. -This means that the constructor does not throw an exception -if servers are unavailable: -the topology is not yet known when the constructor returns. -Similarly if a server has an incompatible wire protocol version, -the constructor does not throw. -Instead, all subsequent operations on the client fail -as long as the error persists. - -See `clients do no I/O in the constructor`_ for the justification. - -Multi-threaded and asynchronous client construction -``````````````````````````````````````````````````` - -The constructor MAY start the monitors as background tasks -and return immediately. -Or the monitors MAY be started by some method separate from the constructor; -for example they MAY be started by some "initialize" method (by any name), -or on the first use of the client for an operation. - -Single-threaded client construction -``````````````````````````````````` - -Single-threaded clients do no I/O in the constructor. -They MUST `scan`_ the servers on demand, -when the first operation is attempted. - -Client closing -'''''''''''''' - -When a client is closing, before it emits the ``TopologyClosedEvent`` as per the -`Events API `_, -it SHOULD `remove`_ all servers from its ``TopologyDescription`` and set its -``TopologyType`` to ``Unknown``, emitting the corresponding -``TopologyDescriptionChangedEvent``. - -Monitoring -'''''''''' - -See the `Server Monitoring spec`_ for how a driver monitors each server. In -summary, the client monitors each server in the topology. The scope of server -monitoring is to provide the topology with updated ServerDescriptions based on -hello or legacy hello command responses. - -.. _parses them: #parsing-a-hello-or-legacy-hello-response - -Parsing a hello or legacy hello response -'''''''''''''''''''''''''''''''''''''''' - -The client represents its view of each server with a `ServerDescription`_. -Each time the client `checks`_ a server, it MUST replace its description of -that server with a new one if and only if the new ServerDescription's -`topologyVersion`_ is greater than or equal to the current ServerDescription's -`topologyVersion`_. - -(See `Replacing the TopologyDescription`_ for an example implementation.) - -This replacement MUST happen even if the new server description compares equal -to the previous one, in order to keep client-tracked attributes like last -update time and round trip time up to date. - -Drivers MUST be able to handle responses to both ``hello`` and legacy hello -commands. When checking results, drivers MUST first check for the -``isWritablePrimary`` field and fall back to checking for an ``ismaster`` field -if ``isWritablePrimary`` was not found. - -ServerDescriptions are created from hello or legacy hello outcomes as follows: - -type -```` - -The new ServerDescription's type field is set to a `ServerType`_. -Note that these states do **not** exactly correspond to -`replica set member states -`_. -For example, some replica set member states like STARTUP and RECOVERING -are identical from the client's perspective, so they are merged into "RSOther". -Additionally, states like Standalone and Mongos -are not replica set member states at all. - -+-------------------+---------------------------------------------------------------+ -| State | Symptoms | -+===================+===============================================================+ -| Unknown | Initial, or after a network error or failed hello or legacy | -| | hello call, or "ok: 1" not in hello or legacy hello response. | -+-------------------+---------------------------------------------------------------+ -| Standalone | No "msg: isdbgrid", no setName, and no "isreplicaset: true". | -+-------------------+---------------------------------------------------------------+ -| Mongos | "msg: isdbgrid". | -+-------------------+---------------------------------------------------------------+ -| PossiblePrimary | Not yet checked, but another member thinks it is the primary. | -+-------------------+---------------------------------------------------------------+ -| RSPrimary | "isWritablePrimary: true" or "ismaster: true", | -| | "setName" in response. | -+-------------------+---------------------------------------------------------------+ -| RSSecondary | "secondary: true", "setName" in response. | -+-------------------+---------------------------------------------------------------+ -| RSArbiter | "arbiterOnly: true", "setName" in response. | -+-------------------+---------------------------------------------------------------+ -| RSOther | "setName" in response, "hidden: true" or not primary, | -| | secondary, nor arbiter. | -+-------------------+---------------------------------------------------------------+ -| RSGhost | "isreplicaset: true" in response. | -+-------------------+---------------------------------------------------------------+ -| LoadBalanced | "loadBalanced=true" in URI. | -+-------------------+---------------------------------------------------------------+ - -A server can transition from any state to any other. For example, an -administrator could shut down a secondary and bring up a mongos in its place. - -.. _RSGhost: #RSGhost-and-RSOther - -RSGhost and RSOther -~~~~~~~~~~~~~~~~~~~ - -The client MUST monitor replica set members -even when they cannot be queried. -These members are in state RSGhost or RSOther. - -**RSGhost** members occur in at least three situations: - -* briefly during server startup, -* in an uninitialized replica set, -* or when the server is shunned (removed from the replica set config). - -An RSGhost server has no hosts list nor setName. -Therefore the client MUST NOT attempt to use its hosts list -nor check its setName -(see `JAVA-1161 `_ -or `CSHARP-671 `_.) -However, the client MUST keep the RSGhost member in its TopologyDescription, -in case the client's only hope for staying connected to the replica set -is that this member will transition to a more useful state. - -For simplicity, this is the rule: -any server is an RSGhost that reports "isreplicaset: true". - -Non-ghost replica set members have reported their setNames -since MongoDB 1.6.2. -See `only support replica set members running MongoDB 1.6.2 or later`_. - -.. note:: The Java driver does not have a separate state for RSGhost; - it is an RSOther server with no hosts list. - -**RSOther** servers may be hidden, starting up, or recovering. -They cannot be queried, but their hosts lists are useful -for discovering the current replica set configuration. - -If a `hidden member `_ -is provided as a seed, -the client can use it to find the primary. -Since the hidden member does not appear in the primary's host list, -it will be removed once the primary is checked. - -error -````` - -If the client experiences any error when checking a server, -it stores error information in the ServerDescription's error field. - -roundTripTime -````````````` - -Drivers MUST record the server's `round trip time`_ (RTT) after each -successful call to hello or legacy hello. The Server Selection Spec -describes how RTT is averaged and how it is used in server selection. -Drivers MUST also record the server's minimum RTT per -`Server Monitoring (Measuring RTT)`_. - -If a hello or legacy hello call fails, the RTT is not updated. -Furthermore, while a server's type is Unknown its RTT is null, -and if it changes from a known type to Unknown its RTT is set to null. -However, if it changes from one known type to another -(e.g. from RSPrimary to RSSecondary) its RTT is updated normally, -not set to null nor restarted from scratch. - -lastWriteDate and opTime -```````````````````````` - -The hello or legacy hello response of a replica set member running MongoDB 3.4 and later -contains a ``lastWrite`` subdocument with fields ``lastWriteDate`` and ``opTime`` -(`SERVER-8858`_). -If these fields are available, parse them from the hello or legacy hello response, -otherwise set them to null. - -Clients MUST NOT attempt to compensate for the network latency between when the server -generated its hello or legacy hello response and when the client records ``lastUpdateTime``. - -.. _SERVER-8858: https://jira.mongodb.org/browse/SERVER-8858 - -lastUpdateTime -`````````````` - -Clients SHOULD set lastUpdateTime with a monotonic clock. - -Hostnames are normalized to lower-case -`````````````````````````````````````` - -The same as with seeds provided in the initial configuration, -all hostnames in the hello or legacy hello response's "me", "hosts", "passives", and "arbiters" -entries MUST be lower-cased. - -This prevents unnecessary work rediscovering a server -if a seed "A" is provided and the server -responds that "a" is in the replica set. - -`RFC 4343 `_: - - Domain Name System (DNS) names are "case insensitive". - -logicalSessionTimeoutMinutes -```````````````````````````` - -MongoDB 3.6 and later include a ``logicalSessionTimeoutMinutes`` field if -logical sessions are enabled in the deployment. Clients MUST check for this -field and set the ServerDescription's logicalSessionTimeoutMinutes field to this -value, or to null otherwise. - -topologyVersion -``````````````` - -MongoDB 4.4 and later include a ``topologyVersion`` field in all hello or legacy hello -and `State Change Error`_ responses. Clients MUST check for this field and set -the ServerDescription's topologyVersion field to this value, if present. -The topologyVersion helps the client and server determine the relative -freshness of topology information in concurrent messages. -(See `What is the purpose of topologyVersion?`_) - -The topologyVersion is a subdocument with two fields, "processId" and -"counter": - -.. code:: typescript - - { - topologyVersion: {processId: , counter: }, - ( ... other fields ...) - } - -topologyVersion Comparison -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To compare a topologyVersion from a hello or legacy hello or State Change Error -response to the current ServerDescription's topologyVersion: - -#. If the response topologyVersion is unset or the ServerDescription's - topologyVersion is null, the client MUST assume the response is more recent. -#. If the response's topologyVersion.processId is not equal to the - ServerDescription's, the client MUST assume the response is more recent. -#. If the response's topologyVersion.processId is equal to the - ServerDescription's, the client MUST use the counter field to determine - which topologyVersion is more recent. - -See `Replacing the TopologyDescription`_ for an example implementation of -topologyVersion comparison. - -serviceId -````````` - -MongoDB 5.0 and later, as well as any mongos-like service, include a ``serviceId`` -field when the service is configured behind a load balancer. - -Other ServerDescription fields -`````````````````````````````` - -Other required fields -defined in the `ServerDescription`_ data structure -are parsed from the hello or legacy hello response in the obvious way. - -.. _updates its view of the topology: - -Server Description Equality -``````````````````````````` - -For the purpose of determining whether to publish SDAM events, two server -descriptions having the same address MUST be considered equal if and only if -the values of `ServerDescription`_ fields marked (=) are respectively equal. - -This specification does not prescribe how to compare server descriptions -with different addresses for equality. - -Updating the TopologyDescription -'''''''''''''''''''''''''''''''' - -Each time the client checks a server, -it processes the outcome (successful or not) -to create a `ServerDescription`_, -and then it processes the ServerDescription to update its `TopologyDescription`_. - -The TopologyDescription's `TopologyType`_ influences -how the ServerDescription is processed. -The following subsection -specifies how the client updates its TopologyDescription -when the TopologyType is Single. -The next subsection treats the other types. - -TopologyType Single -``````````````````` - -The TopologyDescription's type was initialized as Single -and remains Single forever. -There is always one ServerDescription in TopologyDescription.servers. - -Whenever the client checks a server (successfully or not), and regardless of -whether the new server description is equal to the previous server description -as defined in `Server Description Equality`_, -the ServerDescription in TopologyDescription.servers -MUST be replaced with the new ServerDescription. - -.. _is compatible: - - -Checking wire protocol compatibility -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A ServerDescription which is not Unknown is incompatible if: - -* minWireVersion > clientMaxWireVersion, or -* maxWireVersion < clientMinWireVersion - -If any ServerDescription is incompatible, the client MUST set the -TopologyDescription's "compatible" field to false and fill out the -TopologyDescription's "compatibilityError" field like so: - -- if ServerDescription.minWireVersion > clientMaxWireVersion: - - "Server at $host:$port requires wire version $minWireVersion, but this version - of $driverName only supports up to $clientMaxWireVersion." - -- if ServerDescription.maxWireVersion < clientMinWireVersion: - - "Server at $host:$port reports wire version $maxWireVersion, but this version - of $driverName requires at least $clientMinWireVersion (MongoDB - $mongoVersion)." - -Replace $mongoVersion with the appropriate MongoDB minor version, for example if -clientMinWireVersion is 2 and it connects to MongoDB 2.4, format the error like: - - "Server at example.com:27017 reports wire version 0, but this version - of My Driver requires at least 2 (MongoDB 2.6)." - -In this second case, the exact required MongoDB version is known and can be -named in the error message, whereas in the first case the implementer does not -know which MongoDB versions will be compatible or incompatible in the future. - -Verifying setName with TopologyType Single -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A client MAY allow the user to supply a setName with an initial TopologyType -of Single. In this case, if the ServerDescription's setName is null or wrong, -the ServerDescription MUST be replaced with a default ServerDescription of -type Unknown. - - -TopologyType LoadBalanced -````````````````````````` - -See the `Load Balancer Specification <../load-balancers/load-balancers.md#server-discovery-logging-and-monitoring>`__ for details. - -Other TopologyTypes -``````````````````` - -If the TopologyType is **not** Single, the topology can contain zero or more -servers. The state of topology containing zero servers is terminal -(because servers can only be added if they are reported by a server already -in the topology). A client SHOULD emit a warning if it is constructed -with no seeds in the initial seed list. A client SHOULD emit a warning when, -in the process of updating its topology description, it removes the last -server from the topology. - -Whenever a client completes a hello or legacy hello call, -it creates a new ServerDescription with the proper `ServerType`_. -It replaces the server's previous description in TopologyDescription.servers -with the new one. - -Apply the logic for `checking wire protocol compatibility`_ to each -ServerDescription in the topology. -If any server's wire protocol version range does not overlap with the client's, -the client updates the "compatible" and "compatibilityError" fields -as described above for TopologyType Single. -Otherwise "compatible" is set to true. - -It is possible for a multi-threaded client to receive a hello or legacy hello outcome -from a server after the server has been removed from the TopologyDescription. -For example, a monitor begins checking a server "A", -then a different monitor receives a response from the primary -claiming that "A" has been removed from the replica set, -so the client removes "A" from the TopologyDescription. -Then, the check of server "A" completes. - -In all cases, the client MUST ignore hello or legacy hello outcomes from servers -that are not in the TopologyDescription. - -The following subsections explain in detail what actions the client takes -after replacing the ServerDescription. - -TopologyType table -~~~~~~~~~~~~~~~~~~ - -The new ServerDescription's type is the vertical axis, -and the current TopologyType is the horizontal. -Where a ServerType and a TopologyType intersect, -the table shows what action the client takes. - -"no-op" means, -do nothing **after** replacing the server's old description -with the new one. - -.. csv-table:: - :header-rows: 1 - :stub-columns: 1 - - ,TopologyType Unknown,TopologyType Sharded,TopologyType ReplicaSetNoPrimary,TopologyType ReplicaSetWithPrimary - ServerType Unknown,no-op,no-op,no-op,`checkIfHasPrimary`_ - ServerType Standalone,`updateUnknownWithStandalone`_,`remove`_,`remove`_,`remove`_ and `checkIfHasPrimary`_ - ServerType Mongos,Set topology type to Sharded,no-op,`remove`_,`remove`_ and `checkIfHasPrimary`_ - ServerType RSPrimary,Set topology type to ReplicaSetWithPrimary then `updateRSFromPrimary`_,`remove`_,Set topology type to ReplicaSetWithPrimary then `updateRSFromPrimary`_,`updateRSFromPrimary`_ - ServerType RSSecondary,Set topology type to ReplicaSetNoPrimary then `updateRSWithoutPrimary`_,`remove`_,`updateRSWithoutPrimary`_,`updateRSWithPrimaryFromMember`_ - ServerType RSArbiter,Set topology type to ReplicaSetNoPrimary then `updateRSWithoutPrimary`_,`remove`_,`updateRSWithoutPrimary`_,`updateRSWithPrimaryFromMember`_ - ServerType RSOther,Set topology type to ReplicaSetNoPrimary then `updateRSWithoutPrimary`_,`remove`_,`updateRSWithoutPrimary`_,`updateRSWithPrimaryFromMember`_ - ServerType RSGhost,no-op [#]_,`remove`_,no-op,`checkIfHasPrimary`_ - -.. [#] `TopologyType remains Unknown when an RSGhost is discovered`_. - -TopologyType explanations -~~~~~~~~~~~~~~~~~~~~~~~~~ - -This subsection complements the `TopologyType table`_ -with prose explanations of the TopologyTypes (besides Single and LoadBalanced). - -TopologyType Unknown - A starting state. - - **Actions**: - - * If the incoming ServerType is Unknown (that is, the hello or legacy hello call failed), - keep the server in TopologyDescription.servers. - The TopologyType remains Unknown. - * The `TopologyType remains Unknown when an RSGhost is discovered`_, too. - * If the type is Standalone, run `updateUnknownWithStandalone`_. - * If the type is Mongos, set the TopologyType to Sharded. - * If the type is RSPrimary, record its setName - and call `updateRSFromPrimary`_. - * If the type is RSSecondary, RSArbiter or RSOther, record its setName, - set the TopologyType to ReplicaSetNoPrimary, - and call `updateRSWithoutPrimary`_. - -TopologyType Sharded - A steady state. Connected to one or more mongoses. - - **Actions**: - - * If the server is Unknown or Mongos, keep it. - * Remove others. - -TopologyType ReplicaSetNoPrimary - A starting state. - The topology is definitely a replica set, - but no primary is known. - - **Actions**: - - * Keep Unknown servers. - * Keep RSGhost servers: they are members of some replica set, - perhaps this one, and may recover. - (See `RSGhost and RSOther`_.) - * Remove any Standalones or Mongoses. - * If the type is RSPrimary call `updateRSFromPrimary`_. - * If the type is RSSecondary, RSArbiter or RSOther, - run `updateRSWithoutPrimary`_. - -TopologyType ReplicaSetWithPrimary - A steady state. The primary is known. - - **Actions**: - - * If the server type is Unknown, keep it, - and run `checkIfHasPrimary`_. - * Keep RSGhost servers: they are members of some replica set, - perhaps this one, and may recover. - (See `RSGhost and RSOther`_.) - Run `checkIfHasPrimary`_. - * Remove any Standalones or Mongoses - and run `checkIfHasPrimary`_. - * If the type is RSPrimary run `updateRSFromPrimary`_. - * If the type is RSSecondary, RSArbiter or RSOther, - run `updateRSWithPrimaryFromMember`_. - -Actions -``````` - -updateUnknownWithStandalone -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This subroutine is executed with the ServerDescription from Standalone -when the TopologyType is Unknown: - -.. code-block:: python - - if description.address not in topologyDescription.servers: - return - - if settings.seeds has one seed: - topologyDescription.type = Single - else: - remove this server from topologyDescription and stop monitoring it - -See `TopologyType remains Unknown when one of the seeds is a Standalone`_. - -updateRSWithoutPrimary -~~~~~~~~~~~~~~~~~~~~~~ - -This subroutine is executed -with the ServerDescription from an RSSecondary, RSArbiter, or RSOther -when the TopologyType is ReplicaSetNoPrimary: - -.. code-block:: python - - if description.address not in topologyDescription.servers: - return - - if topologyDescription.setName is null: - topologyDescription.setName = description.setName - - else if topologyDescription.setName != description.setName: - remove this server from topologyDescription and stop monitoring it - return - - for each address in description's "hosts", "passives", and "arbiters": - if address is not in topologyDescription.servers: - add new default ServerDescription of type "Unknown" - begin monitoring the new server - - if description.primary is not null: - find the ServerDescription in topologyDescription.servers whose - address equals description.primary - - if its type is Unknown, change its type to PossiblePrimary - - if description.address != description.me: - remove this server from topologyDescription and stop monitoring it - return - -Unlike `updateRSFromPrimary`_, -this subroutine does **not** remove any servers from the TopologyDescription -based on the list of servers in the "hosts" field of the hello or legacy hello -response. The only server that might be removed is the server itself that the -hello or legacy hello response is from. - -The special handling of description.primary -ensures that a single-threaded client -`scans`_ the possible primary before other members. - -See `replica set monitoring with and without a primary`_. - -updateRSWithPrimaryFromMember -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This subroutine is executed with the ServerDescription from -an RSSecondary, RSArbiter, or RSOther when the TopologyType is ReplicaSetWithPrimary: - -.. code-block:: python - - if description.address not in topologyDescription.servers: - # While we were checking this server, another thread heard from the - # primary that this server is not in the replica set. - return - - # SetName is never null here. - if topologyDescription.setName != description.setName: - remove this server from topologyDescription and stop monitoring it - checkIfHasPrimary() - return - - if description.address != description.me: - remove this server from topologyDescription and stop monitoring it - checkIfHasPrimary() - return - - # Had this member been the primary? - if there is no primary in topologyDescription.servers: - topologyDescription.type = ReplicaSetNoPrimary - - if description.primary is not null: - find the ServerDescription in topologyDescription.servers whose - address equals description.primary - - if its type is Unknown, change its type to PossiblePrimary - -The special handling of description.primary -ensures that a single-threaded client -`scans`_ the possible primary before other members. - - -updateRSFromPrimary -~~~~~~~~~~~~~~~~~~~ - -This subroutine is executed with a ServerDescription of type RSPrimary: - -.. code-block:: python - - if serverDescription.address not in topologyDescription.servers: - return - - if topologyDescription.setName is null: - topologyDescription.setName = serverDescription.setName - - else if topologyDescription.setName != serverDescription.setName: - # We found a primary but it doesn't have the setName - # provided by the user or previously discovered. - remove this server from topologyDescription and stop monitoring it - checkIfHasPrimary() - return - - # Election ids are ObjectIds, see - # see "Using electionId and setVersion to detect stale primaries" - # for comparison rules. - - if serverDescription.maxWireVersion >= 17: # MongoDB 6.0+ - # Null values for both electionId and setVersion are always considered less than - if serverDescription.electionId > topologyDescription.maxElectionId or ( - serverDescription.electionId == topologyDescription.maxElectionId - and serverDescription.setVersion >= topologyDescription.maxSetVersion - ): - topologyDescription.maxElectionId = serverDescription.electionId - topologyDescription.maxSetVersion = serverDescription.setVersion - else: - # Stale primary. - # replace serverDescription with a default ServerDescription of type "Unknown" - checkIfHasPrimary() - return - else: - # Maintain old comparison rules, namely setVersion is checked before electionId - if serverDescription.setVersion is not null and serverDescription.electionId is not null: - if ( - topologyDescription.maxSetVersion is not null - and topologyDescription.maxElectionId is not null - and ( - topologyDescription.maxSetVersion > serverDescription.setVersion - or ( - topologyDescription.maxSetVersion == serverDescription.setVersion - and topologyDescription.maxElectionId > serverDescription.electionId - ) - ) - ): - # Stale primary. - # replace serverDescription with a default ServerDescription of type "Unknown" - checkIfHasPrimary() - return - - topologyDescription.maxElectionId = serverDescription.electionId - - if serverDescription.setVersion is not null and ( - topologyDescription.maxSetVersion is null - or serverDescription.setVersion > topologyDescription.maxSetVersion - ): - topologyDescription.maxSetVersion = serverDescription.setVersion - - - for each server in topologyDescription.servers: - if server.address != serverDescription.address: - if server.type is RSPrimary: - # See note below about invalidating an old primary. - replace the server with a default ServerDescription of type "Unknown" - - for each address in serverDescription's "hosts", "passives", and "arbiters": - if address is not in topologyDescription.servers: - add new default ServerDescription of type "Unknown" - begin monitoring the new server - - for each server in topologyDescription.servers: - if server.address not in serverDescription's "hosts", "passives", or "arbiters": - remove the server and stop monitoring it - - checkIfHasPrimary() - -A note on invalidating the old primary: -when a new primary is discovered, -the client finds the previous primary (there should be none or one) -and replaces its description -with a default ServerDescription of type "Unknown." -A multi-threaded client MUST `request an immediate check`_ for that server as -soon as possible. - -If the old primary server version is 4.0 or earlier, -the client MUST clear its connection pool for the old primary, too: -the connections are all bad because the old primary has closed its sockets. -If the old primary server version is 4.2 or newer, the client MUST NOT -clear its connection pool for the old primary. - -See `replica set monitoring with and without a primary`_. - -If the server is primary with an obsolete electionId or setVersion, it is -likely a stale primary that is going to step down. Mark it Unknown and let periodic -monitoring detect when it becomes secondary. See -`using electionId and setVersion to detect stale primaries`_. - -A note on checking "me": Unlike `updateRSWithPrimaryFromMember`, there is no need to remove the server if the address is not equal to -"me": since the server address will not be a member of either "hosts", "passives", or "arbiters", the server will already have been -removed. - -checkIfHasPrimary -~~~~~~~~~~~~~~~~~ - -Set TopologyType to ReplicaSetWithPrimary if there is an RSPrimary -in TopologyDescription.servers, otherwise set it to ReplicaSetNoPrimary. - -For example, if the TopologyType is ReplicaSetWithPrimary -and the client is processing a new ServerDescription of type Unknown, -that could mean the primary just disconnected, -so checkIfHasPrimary must run to check if the TopologyType should become -ReplicaSetNoPrimary. - -Another example is if the client first reaches the primary via its external -IP, but the response's host list includes only internal IPs. -In that case the client adds the primary's internal IP to the -TopologyDescription and begins monitoring it, and removes the external IP. -Right after removing the external IP from the description, -the TopologyType MUST be ReplicaSetNoPrimary, since no primary is -available at this moment. - -remove -~~~~~~ - -Remove the server from TopologyDescription.servers and stop monitoring it. - -In multi-threaded clients, a monitor may be currently checking this server -and may not immediately abort. -Once the check completes, this server's hello or legacy hello outcome MUST be -ignored, and the monitor SHOULD halt. - -Logical Session Timeout -``````````````````````` - -Whenever a client updates the TopologyDescription from a hello or legacy hello response, -it MUST set TopologyDescription.logicalSessionTimeoutMinutes to the smallest -logicalSessionTimeoutMinutes value among ServerDescriptions of all data-bearing -server types. If any have a null logicalSessionTimeoutMinutes, -then TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. - -See the Driver Sessions Spec for the purpose of this value. - -.. _drivers update their topology view in response to errors: - - -Connection Pool Management -'''''''''''''''''''''''''' - -For drivers that support connection pools, after a server check is -completed successfully, if the server is determined to be -`data-bearing `_ -or a -`direct connection `__ -to the server is requested, -and does not already have a connection pool, the driver MUST create -the connection pool for the server. Additionally, if a driver -implements a CMAP compliant connection pool, the server's pool (even -if it already existed) MUST be marked as "ready". See the `Server -Monitoring spec`_ for more information. - -Clearing the connection pool for a server MUST be synchronized with -the update to the corresponding ServerDescription (e.g. by holding the -lock on the TopologyDescription when clearing the pool). This prevents -a possible race between the monitors and application threads. See `Why -synchronize clearing a server's pool with updating the topology?`_ for -more information. - -Error handling -'''''''''''''' - -Network error during server check -````````````````````````````````` - -See error handling in the `Server Monitoring spec`_. - -Application errors -`````````````````` - -When processing a network or command error, clients MUST first check the -error's `generation number`_. If the error's generation number is equal to -the pool's generation number then error handling MUST continue according to -`Network error when reading or writing`_ or -`"not writable primary" and "node is recovering"`_. Otherwise, the error is considered -stale and the client MUST NOT update any topology state. -(See `Why ignore errors based on CMAP's generation number?`_) - -Error handling pseudocode -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Application operations can fail in various places, for example: - -- A network error, network timeout, or command error may occur while - establishing a new connection. Establishing a connection includes the - MongoDB handshake and completing authentication (if configured). -- A network error or network timeout may occur while reading or writing to an - established connection. -- A command error may be returned from the server. -- A "writeConcernError" field may be included in the command response. - -Depending on the context, these errors may update SDAM state by marking -the server Unknown and may clear the server's connection pool. Some errors -also require other side effects, like cancelling a check or requesting an -immediate check. Drivers may use the following pseudocode to guide their -implementation: - -.. code-block:: python - - def handleError(error): - address = error.address - topologyVersion = error.topologyVersion - - with client.lock: - # Ignore stale errors based on generation and topologyVersion. - if isStaleError(client.topologyDescription, error) - return - - if isStateChangeError(error): - # Don't mark server unknown in load balanced mode. - if type != LoadBalanced - # Mark the server Unknown - unknown = new ServerDescription(type=Unknown, error=error, topologyVersion=topologyVersion) - onServerDescriptionChanged(unknown, connection pool for server) - if isShutdown(code) or (error was from <4.2): - # the pools must only be cleared while the lock is held. - if type == LoadBalanced: - clear connection pool for serviceId - else: - clear connection pool for server - if multi-threaded: - request immediate check - else: - # Check right now if this is "not writable primary", since it might be a - # useful secondary. If it's "node is recovering" leave it for the - # next full scan. - if isNotWritablePrimary(error): - check failing server - elif isNetworkError(error) or (not error.completedHandshake and (isNetworkTimeout(error) or isAuthError(error))): - if type != LoadBalanced - # Mark the server Unknown - unknown = new ServerDescription(type=Unknown, error=error) - onServerDescriptionChanged(unknown, connection pool for server) - clear connection pool for server - else - if serviceId - clear connection pool for serviceId - # Cancel inprogress check - cancel monitor check - - def isStaleError(topologyDescription, error): - currentServer = topologyDescription.servers[server.address] - currentGeneration = currentServer.pool.generation - generation = get connection generation from error - if generation < currentGeneration: - # Stale generation number. - return True - - currentTopologyVersion = currentServer.topologyVersion - # True if the current error's topologyVersion is greater than the server's - # We use >= instead of > because any state change should result in a new topologyVersion - return compareTopologyVersion(currentTopologyVersion, error.commandResponse.get("topologyVersion")) >= 0 - -The following pseudocode checks a response for a "not master" or "node is -recovering" error: - -.. code-block:: python - - recoveringCodes = [11600, 11602, 13436, 189, 91] - notWritablePrimaryCodes = [10107, 13435, 10058] - shutdownCodes = [11600, 91] - - def isRecovering(message, code): - if code: - if code in recoveringCodes: - return true - else: - # if no code, use the error message. - return ("not master or secondary" in message - or "node is recovering" in message) - - def isNotWritablePrimary(message, code): - if code: - if code in notWritablePrimaryCodes: - return true - else: - # if no code, use the error message. - if isRecovering(message, None): - return false - return ("not master" in message) - - def isShutdown(code): - if code and code in shutdownCodes: - return true - return false - - def isStateChangeError(error): - message = error.errmsg - code = error.code - return isRecovering(message, code) or isNotWritablePrimary(message, code) - - def parseGle(response): - if "err" in response: - handleError(CommandError(response, response["err"], response["code"])) - - # Parse response to any command besides getLastError. - def parseCommandResponse(response): - if not response["ok"]: - handleError(CommandError(response, response["errmsg"], response["code"])) - else if response["writeConcernError"]: - wce = response["writeConcernError"] - handleError(WriteConcernError(response, wce["errmsg"], wce["code"])) - - def parseQueryResponse(response): - if the "QueryFailure" bit is set in response flags: - handleError(CommandError(response, response["$err"], response["code"])) - -The following sections describe the handling of different classes of -application errors in detail including network errors, network timeout errors, -state change errors, and authentication errors. - -Network error when reading or writing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To describe how the client responds to network errors during application operations, -we distinguish two phases of connecting to a server and using it for application operations: - -- *Before the handshake completes*: the client establishes a new connection to the server - and completes an initial handshake by calling "hello" or legacy hello and reading the - response, and optionally completing authentication -- *After the handshake completes*: the client uses the established connection for - application operations - -If there is a network error or timeout on the connection before the handshake completes, -the client MUST replace the server's description -with a default ServerDescription of type Unknown when the TopologyType is not -LoadBalanced, and fill the ServerDescription's error field with useful information. - -If there is a network error or timeout on the connection before the handshake completes, -and the TopologyType is LoadBalanced, the client MUST keep the ServerDescription -as LoadBalancer. - -If there is a network timeout on the connection after the handshake completes, -the client MUST NOT mark the server Unknown. -(A timeout may indicate a slow operation on the server, -rather than an unavailable server.) -If, however, there is some other network error on the connection after the -handshake completes, the client MUST replace the server's description -with a default ServerDescription of type Unknown if the TopologyType is not -LoadBalanced, and fill the ServerDescription's error field with useful information, -the same as if an error or timeout occurred before the handshake completed. - -When the client marks a server Unknown due to a network error or timeout, -the Unknown ServerDescription MUST be sent through the same process for -`updating the TopologyDescription`_ as if it had been a failed hello or legacy hello outcome -from a server check: for example, if the TopologyType is ReplicaSetWithPrimary -and a write to the RSPrimary server fails because of a network error -(other than timeout), then a new ServerDescription is created for the primary, -with type Unknown, and the client executes the proper subroutine for an -Unknown server when the TopologyType is ReplicaSetWithPrimary: -referring to the table above we see the subroutine is `checkIfHasPrimary`_. -The result is the TopologyType changes to ReplicaSetNoPrimary. -See the test scenario called "Network error writing to primary". - -The client MUST close all idle sockets in its connection pool for the server: -if one socket is bad, it is likely that all are. - -Clients MUST NOT request an immediate check of the server; -since application sockets are used frequently, a network error likely means -the server has just become unavailable, -so an immediate refresh is likely to get a network error, too. - -The server will not remain Unknown forever. -It will be refreshed by the next periodic check or, -if an application operation needs the server sooner than that, -then a re-check will be triggered by the server selection algorithm. - -"not writable primary" and "node is recovering" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -These errors are detected from a getLastError response, -write command response, or query response. Clients MUST check if the server -error is a "node is recovering" error or a "not writable primary" error. - -If the response includes an error code, it MUST be solely used to determine -if error is a "node is recovering" or "not writable primary" error. -Clients MUST match the errors by the numeric error code and not by the code -name, as the code name can change from one server version to the next. - -The following error codes indicate a replica set member is temporarily -unusable. These are called "node is recovering" errors: - -.. list-table:: - :header-rows: 1 - - * - Error Name - - Error Code - * - InterruptedAtShutdown - - 11600 - * - InterruptedDueToReplStateChange - - 11602 - * - NotPrimaryOrSecondary - - 13436 - * - PrimarySteppedDown - - 189 - * - ShutdownInProgress - - 91 - -And the following error codes indicate a "not writable primary" error: - -.. list-table:: - :header-rows: 1 - - * - Error Name - - Error Code - * - NotWritablePrimary - - 10107 - * - NotPrimaryNoSecondaryOk - - 13435 - * - LegacyNotPrimary - - 10058 - -Clients MUST fallback to checking the error message if and only if the -response does not include an error code. The error is considered a "node -is recovering" error if the substrings "node is recovering" or "not master or -secondary" are anywhere in the error message. Otherwise, if the substring "not -master" is in the error message it is a "not writable primary" error. - -Additionally, if the response includes a write concern error, then the code -and message of the write concern error MUST be checked the same way a response -error is checked above. - -Errors contained within the writeErrors field MUST NOT be checked. - -See the test scenario called -"parsing 'not writable primary' and 'node is recovering' errors" -for example response documents. - -When the client sees a "not writable primary" or "node is recovering" error and -the error's `topologyVersion`_ is strictly greater than the current -ServerDescription's topologyVersion it MUST replace the server's description -with a ServerDescription of type Unknown. -Clients MUST store useful information in the new ServerDescription's error -field, including the error message from the server. -Clients MUST store the error's `topologyVersion`_ field in the new -ServerDescription if present. -(See `What is the purpose of topologyVersion?`_) - -Multi-threaded and asynchronous clients MUST `request an immediate check`_ -of the server. -Unlike in the "network error" scenario above, -a "not writable primary" or "node is recovering" error means the server is available -but the client is wrong about its type, -thus an immediate re-check is likely to provide useful information. - -For single-threaded clients, in the case of a "not writable primary" or "node is -shutting down" error, the client MUST mark the topology as "stale" so the next -server selection scans all servers. For a "node is recovering" error, -single-threaded clients MUST NOT mark the topology as "stale". If a node is -recovering for some time, an immediate scan may not gain useful information. - -The following subset of "node is recovering" errors is defined to be "node is -shutting down" errors: - -.. list-table:: - :header-rows: 1 - - * - Error Name - - Error Code - * - InterruptedAtShutdown - - 11600 - * - ShutdownInProgress - - 91 - -When handling a "not writable primary" or "node is recovering" error, the client MUST -clear the server's connection pool if and only if the error is -"node is shutting down" or the error originated from server version < 4.2. - -(See `when does a client see "not writable primary" or "node is recovering"?`_, `use -error messages to detect "not master" and "node is recovering"`_, and `other -transient errors`_ and `Why close connections when a node is shutting down?`_.) - -Authentication errors -~~~~~~~~~~~~~~~~~~~~~ - -If the authentication handshake fails for a connection, drivers MUST mark the -server Unknown and clear the server's connection pool if the TopologyType is -not LoadBalanced. (See `Why mark a server Unknown after an auth error?`_) - -Monitoring SDAM events -'''''''''''''''''''''' - -The required driver specification for providing lifecycle hooks into server -discovery and monitoring for applications to consume can be found in the -`SDAM Monitoring Specification`_. - -Implementation notes -'''''''''''''''''''' - -This section intends to provide generous guidance to driver authors. -It is complementary to the reference implementations. -Words like "should", "may", and so on are used more casually here. - -See also, the implementation notes in the `Server Monitoring spec`_. - -.. _interaction between monitoring and server selection: - -Multi-threaded or asynchronous server selection -``````````````````````````````````````````````` - -While no suitable server is available for an operation, -`the client MUST re-check all servers every minHeartbeatFrequencyMS`_. -(See `requesting an immediate check`_.) - -Single-threaded server selection -```````````````````````````````` - -When a client that uses `single-threaded monitoring`_ -fails to select a suitable server for any operation, -it `scans`_ the servers, then attempts selection again, -to see if the scan discovered suitable servers. It repeats, waiting -`minHeartbeatFrequencyMS`_ after each scan, until a timeout. - -Documentation -````````````` - -Giant seed lists -~~~~~~~~~~~~~~~~ - -Drivers' manuals should warn against huge seed lists, -since it will slow initialization for single-threaded clients -and generate load for multi-threaded and asynchronous drivers. - -.. _implementation notes for multi-threaded clients: - -Multi-threaded -`````````````` - -.. _use min and maxWireVersion only to determine compatibility: - -Warning about the maxWireVersion from a monitor's hello or legacy hello response -```````````````````````````````````````````````````````````````````````````````` - -Clients consult some fields from a server's hello or legacy hello response -to decide how to communicate with it: - -* maxWireVersion -* maxBsonObjectSize -* maxMessageSizeBytes -* maxWriteBatchSize - -It is tempting to take these values -from the last hello or legacy hello response a *monitor* received -and store them in the ServerDescription, but this is an anti-pattern. -Multi-threaded and asynchronous clients that do so -are prone to several classes of race, for example: - -* Setup: A MongoDB 3.0 Standalone with authentication enabled, - the client must log in with SCRAM-SHA-1. -* The monitor thread discovers the server - and stores maxWireVersion on the ServerDescription -* An application thread wants a socket, selects the Standalone, - and is about to check the maxWireVersion on its ServerDescription when... -* The monitor thread gets disconnected from server and marks it Unknown, - with default maxWireVersion of 0. -* The application thread resumes, creates a socket, - and attempts to log in using MONGODB-CR, - since maxWireVersion is *now* reported as 0. -* Authentication fails, the server requires SCRAM-SHA-1. - -Better to call hello or legacy hello for each new socket, as required by the `Auth Spec -<../auth/auth.md>`_, -and use the hello or legacy hello response associated with that socket -for maxWireVersion, maxBsonObjectSize, etc.: -all the fields required to correctly communicate with the server. - -The hello or legacy hello responses received by monitors determine if the topology -as a whole `is compatible`_ with the driver, -and which servers are suitable for selection. -The monitors' responses should not be used to determine how to format -wire protocol messages to the servers. - -Immutable data -~~~~~~~~~~~~~~ - -Multi-threaded drivers should treat -ServerDescriptions and -TopologyDescriptions as immutable: -the client replaces them, rather than modifying them, -in response to new information about the topology. -Thus readers of these data structures -can simply acquire a reference to the current one -and read it, without holding a lock that would block a monitor -from making further updates. - -Process one hello or legacy hello outcome at a time -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Although servers are checked in parallel, -the function that actually creates the new TopologyDescription -should be synchronized so only one thread can run it at a time. - -.. _onServerDescriptionChanged: - -Replacing the TopologyDescription -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Drivers may use the following pseudocode to guide their -implementation. The client object has a lock and a condition -variable. It uses the lock to ensure that only one new -ServerDescription is processed at a time, and it must be acquired -before invoking this function. Once the client has taken the lock it -must do no I/O:: - - def onServerDescriptionChanged(server, pool): - # "server" is the new ServerDescription. - # "pool" is the pool associated with the server - - if server.address not in client.topologyDescription.servers: - # The server was once in the topologyDescription, otherwise - # we wouldn't have been monitoring it, but an intervening - # state-change removed it. E.g., we got a host list from - # the primary that didn't include this server. - return - - newTopologyDescription = client.topologyDescription.copy() - - # Ignore this update if the current topologyVersion is greater than - # the new ServerDescription's. - if isStaleServerDescription(td, server): - return - - # Replace server's previous description. - address = server.address - newTopologyDescription.servers[address] = server - - # for drivers that implement CMAP, mark the connection pool as ready after a successful check - if (server.type in (Mongos, RSPrimary, RSSecondary, Standalone, LoadBalanced)) - or (server.type != Unknown and newTopologyDescription.type == Single): - pool.ready() - - take any additional actions, - depending on the TopologyType and server... - - # Replace TopologyDescription and notify waiters. - client.topologyDescription = newTopologyDescription - client.condition.notifyAll() - - def compareTopologyVersion(tv1, tv2): - """Return -1 if tv1tv2""" - if tv1 is None or tv2 is None: - # Assume greater. - return -1 - pid1 = tv1['processId'] - pid2 = tv2['processId'] - if pid1 == pid2: - counter1 = tv1['counter'] - counter2 = tv2['counter'] - if counter1 == counter2: - return 0 - elif counter1 < counter2: - return -1 - else: - return 1 - else: - # Assume greater. - return -1 - - def isStaleServerDescription(topologyDescription, server): - # True if the new ServerDescription's topologyVersion is greater than - # or equal to the current server's. - currentServer = topologyDescription.servers[server.address] - currentTopologyVersion = currentServer.topologyVersion - return compareTopologyVersion(currentTopologyVersion, server.topologyVersion) > 0 - -.. https://github.com/mongodb/mongo-java-driver/blob/5fb47a3bf86c56ed949ce49258a351773f716d07/src/main/com/mongodb/BaseCluster.java#L160 - -Notifying the condition unblocks threads waiting in the server-selection loop -for a suitable server to be discovered. .. note:: - The Java driver uses a CountDownLatch instead of a condition variable, - and it atomically swaps the old and new CountDownLatches - so it does not need "client.lock". - It does, however, use a lock to ensure that only one thread runs - onServerDescriptionChanged at a time. - -Rationale ---------- - -Clients do no I/O in the constructor -'''''''''''''''''''''''''''''''''''' - -An alternative proposal was to distinguish between "discovery" and "monitoring". -When discovery begins, the client checks all its seeds, -and discovery is complete once all servers have been checked, -or after some maximum time. -Application operations cannot proceed until discovery is complete. - -If the discovery phase is distinct, -then single- and multi-threaded drivers -could accomplish discovery in the constructor, -and throw an exception from the constructor -if the deployment is unavailable or misconfigured. -This is consistent with prior behavior for many drivers. -It will surprise some users that the constructor now succeeds, -but all operations fail. - -Similarly for misconfigured seed lists: -the client may discover a mix of mongoses and standalones, -or find multiple replica set names. -It may surprise some users that the constructor succeeds -and the client attempts to proceed with a compatible subset of the deployment. - -Nevertheless, this spec prohibits I/O in the constructor -for the following reasons: - -Common case -``````````` - -In the common case, the deployment is available and usable. -This spec favors allowing operations to proceed as soon as possible -in the common case, -at the cost of surprising behavior in uncommon cases. - -Simplicity -`````````` - -It is simpler to omit a special discovery phase -and treat all server `checks`_ the same. - -Consistency -``````````` - -Asynchronous clients cannot do I/O in a constructor, -so it is consistent to prohibit I/O in other clients' constructors as well. - -Restarts -```````` - -If clients can be constructed when the deployment is in some states -but not in other states, -it leads to an unfortunate scenario: -When the deployment is passing through a strange state, -long-running clients may keep working, -but any clients restarted during this period fail. - -Say an administrator changes one replica set member's setName. -Clients that are already constructed remove the bad member and stay usable, -but if any client is restarted its constructor fails. -Web servers that dynamically adjust their process pools -will show particularly undesirable behavior. - -heartbeatFrequencyMS defaults to 10 seconds or 60 seconds -''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Many drivers have different values. The time has come to standardize. -Lacking a rigorous methodology for calculating the best frequency, -this spec chooses 10 seconds for multi-threaded or asynchronous drivers -because some already use that value. - -Because scanning has a greater impact on -the performance of single-threaded drivers, -they MUST default to a longer frequency (60 seconds). - -An alternative is to check servers less and less frequently -the longer they remain unchanged. -This idea is rejected because -it is a goal of this spec to answer questions about monitoring such as, - -* "How rapidly can I rotate a replica set to a new set of hosts?" -* "How soon after I add a secondary will query load be rebalanced?" -* "How soon will a client notice a change in round trip time, or tags?" - -Having a constant monitoring frequency allows us to answer these questions -simply and definitively. -Losing the ability to answer these questions is not worth -any minor gain in efficiency from a more complex scheduling method. - -The client MUST re-check all servers every minHeartbeatFrequencyMS -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -While an application is waiting to do an operation -for which there is no suitable server, -a multi-threaded client MUST re-check all servers very frequently. -The slight cost is worthwhile in many scenarios. For example: - -#. A client and a MongoDB server are started simultaneously. -#. The client checks the server before it begins listening, - so the check fails. -#. The client waits in the server-selection loop for the topology to change. - -In this state, the client should check the server very frequently, -to give it ample opportunity to connect to the server before -timing out in server selection. - -No knobs -'''''''' - -This spec does not intend to introduce any new configuration options -unless absolutely necessary. - -.. _monitors all three types of servers: - -The client MUST monitor arbiters -'''''''''''''''''''''''''''''''' - -Mongos 2.6 does not monitor arbiters, -but it costs little to do so, -and in the rare case that -all data members are moved to new hosts in a short time, -an arbiter may be the client's last hope -to find the new replica set configuration. - -Only support replica set members running MongoDB 1.6.2 or later -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Replica set members began reporting their setNames in that version. -Supporting earlier versions is impractical. - -TopologyType remains Unknown when an RSGhost is discovered -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -If the TopologyType is Unknown and the client receives a hello or legacy hello response -from an`RSGhost`_, the TopologyType could be set to ReplicaSetNoPrimary. -However, an RSGhost does not report its setName, -so the setName would still be unknown. -This adds an additional state to the existing list: -"TopologyType ReplicaSetNoPrimary **and** no setName." -The additional state adds substantial complexity -without any benefit, so this spec says clients MUST NOT change the TopologyType -when an RSGhost is discovered. - -TopologyType remains Unknown when one of the seeds is a Standalone -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -If TopologyType is Unknown and there are multiple seeds, -and one of them is discovered to be a standalone, -it MUST be removed. -The TopologyType remains Unknown. - -This rule supports the following common scenario: - -#. Servers A and B are in a replica set. -#. A seed list with A and B is stored in a configuration file. -#. An administrator removes B from the set and brings it up as standalone - for maintenance, without changing its port number. -#. The client is initialized with seeds A and B, - TopologyType Unknown, and no setName. -#. The first hello or legacy hello response is from B, the standalone. - -What if the client changed TopologyType to Single at this point? -It would be unable to use the replica set; it would have to remove A -from the TopologyDescription once A's hello or legacy hello response comes. - -The user's intent in this case is clearly to use the replica set, -despite the outdated seed list. So this spec requires clients to remove B -from the TopologyDescription and keep the TopologyType as Unknown. -Then when A's response arrives, the client can set its TopologyType -to ReplicaSet (with or without primary). - -On the other hand, -if there is only one seed and the seed is discovered to be a Standalone, -the TopologyType MUST be set to Single. - -See the "member brought up as standalone" test scenario. - - -Replica set monitoring with and without a primary -''''''''''''''''''''''''''''''''''''''''''''''''' - -The client strives to fill the "servers" list -only with servers that the **primary** -said were members of the replica set, -when the client most recently contacted the primary. - -The primary's view of the replica set is authoritative for two reasons: - -1. The primary is never on the minority side of a network partition. - During a partition it is the primary's list of - servers the client should use. -2. Since reconfigs must be executed on the primary, - the primary is the first to know of them. - Reconfigs propagate to non-primaries eventually, - but the client can receive hello or legacy hello responses from non-primaries - that reflect any past state of the replica set. - See the "Replica set discovery" test scenario. - -If at any time the client believes there is no primary, -the TopologyDescription's type is set to ReplicaSetNoPrimary. -While there is no known primary, -the client MUST **add** servers from non-primaries' host lists, -but it MUST NOT remove servers from the TopologyDescription. - -Eventually, when a primary is discovered, any hosts not in the primary's host -list are removed. - -.. _stale primaries: - -Using electionId and setVersion to detect stale primaries -''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Replica set members running MongoDB 2.6.10+ or 3.0+ include an integer called -"setVersion" and an ObjectId called -"electionId" in their hello or legacy hello response. -Starting with MongoDB 3.2.0, replica sets can use two different replication -protocol versions; electionIds from one protocol version must not be compared -to electionIds from a different protocol. - -Because protocol version changes require replica set reconfiguration, -clients use the tuple (electionId, setVersion) to detect stale primaries. -The tuple order comparison MUST be checked in the order of electionId followed -by setVersion since that order of comparison is guaranteed monotonicity. - -The client remembers the greatest electionId and setVersion reported by a primary, -and distrusts primaries from older electionIds or from the same electionId -but with lesser setVersion. - -- It compares electionIds as 12-byte sequence i.e. memory comparison. -- It compares setVersions as integer values. - -This prevents the client from oscillating -between the old and new primary during a split-brain period, -and helps provide read-your-writes consistency with write concern "majority" -and read preference "primary". - -Prior to MongoDB server version 6.0 drivers had the logic opposite from -the server side Replica Set Management logic by ordering the tuple by ``setVersion`` before the ``electionId``. -In order to remain compatibility with backup systems, etc. drivers continue to -maintain the reversed logic when connected to a topology that reports a maxWireVersion less than ``17``. -Server versions 6.0 and beyond MUST order the tuple by ``electionId`` then ``setVersion``. - -Requirements for read-your-writes consistency -````````````````````````````````````````````` - -Using (electionId, setVersion) only provides read-your-writes consistency if: - -* The application uses the same MongoClient instance for write-concern - "majority" writes and read-preference "primary" reads, and -* All members use MongoDB 2.6.10+, 3.0.0+ or 3.2.0+ with replication protocol 0 - and clocks are *less* than 30 seconds skewed, or -* All members run MongoDB 3.2.0 and replication protocol 1 - and clocks are *less* skewed than the election timeout - (`electionTimeoutMillis`, which defaults to 10 seconds), or -* All members run MongoDB 3.2.1+ and replication protocol 1 - (in which case clocks need not be synchronized). - -Scenario -```````` - -Consider the following situation: - -1. Server A is primary. -2. A network partition isolates A from the set, but the client still sees it. -3. Server B is elected primary. -4. The client discovers that B is primary, does a write-concern "majority" - write operation on B and receives acknowledgment. -5. The client receives a hello or legacy hello response from A, claiming A is still primary. -6. If the client trusts that A is primary, the next read-preference "primary" - read sees stale data from A that may *not* include the write sent to B. - -See `SERVER-17975 `_, "Stale -reads with WriteConcern Majority and ReadPreference Primary." - -Detecting a stale primary -````````````````````````` - -To prevent this scenario, the client uses electionId and setVersion to -determine which primary was elected last. In this case, it would not consider -"A" a primary, nor read from it because server B will have a greater electionId -but the same setVersion. - -Monotonicity -```````````` - -The electionId is an ObjectId compared bytewise in order. - -(ie. 000000000000000000000001 > 000000000000000000000000, FF0000000000000000000000 > FE0000000000000000000000 etc.) - -In some server versions, it is monotonic with respect -to a particular servers' system clock, but is not globally monotonic across -a deployment. However, if inter-server clock skews are small, it can be -treated as a monotonic value. - -In MongoDB 2.6.10+ (which has `SERVER-13542 `_ backported), -MongoDB 3.0.0+ or MongoDB 3.2+ (under replication protocol version 0), -the electionId's leading bytes are a server timestamp. -As long as server clocks are skewed *less* than 30 seconds, -electionIds can be reliably compared. -(This is precise enough, because in replication protocol version 0, servers -are designed not to complete more than one election every 30 seconds. -Elections do not take 30 seconds--they are typically much faster than that--but -there is a 30-second cooldown before the next election can complete.) - -Beginning in MongoDB 3.2.0, under replication protocol version 1, -the electionId begins with a timestamp, but -the cooldown is shorter. As long as inter-server clock skew is *less* than -the configured election timeout (`electionTimeoutMillis`, which defaults to -10 seconds), then electionIds can be reliably compared. - -Beginning in MongoDB 3.2.1, under replication protocol version 1, -the electionId is guaranteed monotonic -without relying on any clock synchronization. - -Using me field to detect seed list members that do not match host names in the replica set configuration -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Removal from the topology of seed list members where the "me" property does not -match the address used to connect prevents clients from being able to select -a server, only to fail to re-select that server once the primary has responded. - -This scenario illustrates the problems that arise if this is NOT done: - -* The client specifies a seed list of A, B, C -* Server A responds as a secondary with hosts D, E, F -* The client executes a query with read preference of secondary, and server A - is selected -* Server B responds as a primary with hosts D, E, F. Servers A, B, C are - removed, as they don't appear in the primary's hosts list -* The client iterates the cursor and attempts to execute a getMore against - server A. -* Server selection fails because server A is no longer part of the topology. - -With checking for "me" in place, it looks like this instead: - -* The client specifies a seed list of A, B, C -* Server A responds as a secondary with hosts D, E, F, where "me" is D, and so - the client adds D, E, F as type "Unknown" and starts monitoring them, but - removes A from the topology. -* The client executes a query with read preference of secondary, and goes into - the server selection loop -* Server D responds as a secondary where "me" is D -* Server selection completes by matching D -* The client iterates the cursor and attempts to execute a getMore against - server D. -* Server selection completes by matching D. - -Ignore setVersion unless the server is primary -'''''''''''''''''''''''''''''''''''''''''''''' - -It was thought that if all replica set members report a setVersion, -and a secondary's response has a higher setVersion than any seen, -that the secondary's host list could be considered as authoritative -as the primary's. (See `Replica set monitoring with and without a primary`_.) - -This scenario illustrates the problem with setVersion: - -* We have a replica set with servers A, B, and C. -* Server A is the primary, with setVersion 4. -* An administrator runs replSetReconfig on A, - which increments its setVersion to 5. -* The client checks Server A and receives the new config. -* Server A crashes before any secondary receives the new config. -* Server B is elected primary. It has the old setVersion 4. -* The client ignores B's version of the config - because its setVersion is not greater than 5. - -The client may never correct its view of the topology. - -Even worse: - -* An administrator runs replSetReconfig - on Server B, which increments its setVersion to 5. -* Server A restarts. - This results in *two* versions of the config, - both claiming to be version 5. - -If the client trusted the setVersion in this scenario, -it would trust whichever config it received first. - -mongos 2.6 ignores setVersion and only trusts the primary. -This spec requires all clients to ignore setVersion from non-primaries. - -Use error messages to detect "not master" and "node is recovering" -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -When error codes are not available, error messages are checked for the -substrings "not master" and "node is recovering". This is because older server -versions returned unstable error codes or no error codes in many -circumstances. - -Other transient errors -'''''''''''''''''''''' - -There are other transient errors a server may return, e.g. retryable errors -listed in the retryable writes spec. SDAM does not consider these because they -do not imply the connected server should be marked as "Unknown". For example, -the following errors may be returned from a mongos when it cannot route to a -shard: - -.. list-table:: - :header-rows: 1 - - * - Error Name - - Error Code - * - HostNotFound - - 7 - * - HostUnreachable - - 6 - * - NetworkTimeout - - 89 - * - SocketException - - 9001 - -When these are returned, the mongos should *not* be marked as "Unknown", since -it is more likely an issue with the shard. - -Why ignore errors based on CMAP's generation number? -'''''''''''''''''''''''''''''''''''''''''''''''''''' - -Using CMAP's `generation number`_ solves the following race condition among -application threads and the monitor during error handling: - -#. Two concurrent writes begin on application threads A and B. -#. The server restarts. -#. Thread A receives the first non-timeout network error, and the client - marks the server Unknown, and clears the server's pool. -#. The client re-checks the server and marks it Primary. -#. Thread B receives the second non-timeout network error and the client - marks the server Unknown again. - -The core issue is that the client processes errors in arbitrary order -and may overwrite fresh information about the server's status with stale -information. Using CMAP's generation number avoids the race condition because -the duplicate (or stale) network error can be identified (changes in -**bold**): - -#. Two concurrent writes begin on application threads A and B, **with - generation 1**. -#. The server restarts. -#. Thread A receives the first non-timeout network error, and the client - marks the server Unknown, and clears the server's pool. **The - pool's generation is now 2.** -#. The client re-checks the server and marks it Primary. -#. Thread B receives the second non-timeout network error, **and the - client ignores the error because the error originated from a - connection with generation 1.** - -Why synchronize clearing a server's pool with updating the topology? -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Doing so solves the following race condition among application threads -and the monitor during error handling, similar to the previous -example: - -#. A write begins on an application thread. -#. The server restarts. -#. The application thread receives a non-timeout network error. -#. The application thread acquires the lock on the TopologyDescription, marks - the Server as Unknown, and releases the lock. -#. The monitor re-checks the server and marks it Primary and its pool - as "ready". -#. Several other application threads enter the WaitQueue of the - server's pool. -#. The application thread clears the server's pool, evicting all those - new threads from the WaitQueue, causing them to return errors or to - retry. Additionally, the pool is now "paused", but the server is - considered the Primary, meaning future operations will be routed to - the server and fail until the next heartbeat marks the pool as - "ready" again. - -If marking the server as Unknown and clearing its pool were -synchronized, then the monitor marking the server as Primary after its -check would happen after the pool was cleared and thus avoid putting -it an inconsistent state. - -What is the purpose of topologyVersion? -''''''''''''''''''''''''''''''''''''''' - -`topologyVersion`_ solves the following race condition among application -threads and the monitor when handling `State Change Errors`_: - -#. Two concurrent writes begin on application threads A and B. -#. The primary steps down. -#. Thread A receives the first State Change Error, the client marks the - server Unknown. -#. The client re-checks the server and marks it Secondary. -#. Thread B receives a delayed State Change Error and the client marks - the server Unknown again. - -The core issue is that the client processes errors in arbitrary order -and may overwrite fresh information about the server's status with stale -information. Using topologyVersion avoids the race condition because the -duplicate (or stale) State Change Errors can be identified (changes in -**bold**): - -#. Two concurrent writes begin on application threads A and B. - - a. **The primary's ServerDescription.topologyVersion == tv1** - -#. The primary steps down **and sets its topologyVersion to tv2**. -#. Thread A receives the first State Change Error **containing tv2**, - the client marks the server Unknown (**with topologyVersion: tv2**). -#. The client re-checks the server and marks it Secondary (**with - topologyVersion: tv2**). -#. Thread B receives a delayed State Change Error (**with - topologyVersion: tv2**) **and the client ignores the error because - the error's topologyVersion (tv2) is not greater than the current - ServerDescription (tv2).** - -Why mark a server Unknown after an auth error? -'''''''''''''''''''''''''''''''''''''''''''''' - -The `Authentication spec`_ requires that when authentication fails on a server, -the driver MUST clear the server's connection pool. Clearing the pool without -marking the server Unknown would leave the pool in the "paused" state while -the server is still selectable. When auth fails due to invalid credentials, -marking the server Unknown also serves to rate limit new connections; -future operations will need to wait for the server to be rediscovered. - -Note that authentication may fail for a variety of reasons, for example: - -- A network error, or network timeout error may occur. -- The server may return a `State Change Error`_. -- The server may return a AuthenticationFailed command error (error code 18) - indicating that the provided credentials are invalid. - -Does this mean that authentication failures due to invalid credentials will -manifest as server selection timeout errors? No, authentication errors are -still returned to the application immediately. A subsequent operation will -block until the server is rediscovered and immediately attempt -authentication on a new connection. - -Clients use the hostnames listed in the replica set config, not the seed list -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Very often users have DNS aliases they use in their `seed list`_ instead of -the hostnames in the replica set config. For example, the name "host_alias" -might refer to a server also known as "host1", and the URI is:: - - mongodb://host_alias/?replicaSet=rs - -When the client connects to "host_alias", its hello or legacy hello response includes the -list of hostnames from the replica set config, which does not include the seed:: - - { - hosts: ["host1:27017", "host2:27017"], - setName: "rs", - ... other hello or legacy hello response fields ... - } - -This spec requires clients to connect to the hostnames listed in the hello or legacy hello -response. Furthermore, if the response is from a primary, the client MUST -remove all hostnames not listed. In this case, the client disconnects from -"host_alias" and tries "host1" and "host2". (See `updateRSFromPrimary`_.) - -Thus, replica set members must be reachable from the client by the hostnames -listed in the replica set config. - -An alternative proposal is for clients to continue using the hostnames in the -seed list. It could add new hosts from the hello or legacy hello response, and where a host -is known by two names, the client can deduplicate them using the "me" field and -prefer the name in the seed list. - -This proposal was rejected because it does not support key features of replica -sets: failover and zero-downtime reconfiguration. - -In our example, if "host1" and "host2" are not reachable from the client, the -client continues to use "host_alias" only. If that server goes down or is -removed by a replica set reconfig, the client is suddenly unable to reach the -replica set at all: by allowing the client to use the alias, we have hidden the -fact that the replica set's failover feature will not work in a crisis or -during a reconfig. - -In conclusion, to support key features of replica sets, we require that the -hostnames used in a replica set config are reachable from the client. - -Backwards Compatibility ------------------------ - -The Java driver 2.12.1 has a "heartbeatConnectRetryFrequency". -Since this spec recommends the option be named "minHeartbeatFrequencyMS", -the Java driver must deprecate its old option -and rename it minHeartbeatFrequency (for consistency with its other options -which also lack the "MS" suffix). - -Reference Implementation ------------------------- - -* Java driver 3.x -* PyMongo 3.x -* Perl driver 1.0.0 (in progress) - -Future Work ------------ - -MongoDB is likely to add some of the following features, -which will require updates to this spec: - -* Eventually consistent collections (SERVER-2956) -* Mongos discovery (SERVER-1834) -* Put individual databases into maintenance mode, - instead of the whole server (SERVER-7826) -* Put setVersion in write-command responses (SERVER-13909) - -Questions and Answers ---------------------- - -When does a client see "not writable primary" or "node is recovering"? -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -These errors indicate one of these: - -* A write was attempted on an unwritable server - (arbiter, secondary, ghost, or recovering). -* A read was attempted on an unreadable server - (arbiter, ghost, or recovering) - or a read was attempted on a read-only server without the secondaryOk bit set. -* An operation was attempted on a server that is now shutting down. - -In any case the error is a symptom that -a ServerDescription's type no longer reflects reality. - -On MongoDB 4.0 and earlier, a primary closes its connections when it steps -down, so in many cases the next operation causes a network error -rather than "not writable primary". -The driver can see a "not writable primary" error in the following scenario: - -#. The client discovers the primary. -#. The primary steps down. -#. Before the client checks the server and discovers the stepdown, - the application attempts an operation. -#. The client's connection pool is empty, - either because it has - never attempted an operation on this server, - or because all connections are in use by other threads. -#. The client creates a connection to the old primary. -#. The client attempts to write, or to read without the secondaryOk bit, - and receives "not writable primary". - -See `"not writable primary" and "node is recovering"`_, -and the test scenario called -"parsing 'not writable primary' and 'node is recovering' errors". - -Why close connections when a node is shutting down? -''''''''''''''''''''''''''''''''''''''''''''''''''' - -When a server shuts down, it will return one of the "node is shutting down" -errors for each attempted operation and eventually will close all connections. -Keeping a connection to a server which is shutting down open would only -produce errors on this connection - such a connection will never be usable for -any operations. In contrast, when a server 4.2 or later returns "not writable primary" -error the connection may be usable for other operations (such as secondary reads). - -What's the point of periodic monitoring? -'''''''''''''''''''''''''''''''''''''''' - -Why not just wait until a "not writable primary" error or -"node is recovering" error informs the client that its -TopologyDescription is wrong? Or wait until server selection -fails to find a suitable server, and only scan all servers then? - -Periodic monitoring accomplishes three objectives: - -* Update each server's type, tags, and `round trip time`_. - Read preferences and the mongos selection algorithm - require this information remains up to date. -* Discover new secondaries so that secondary reads are evenly spread. -* Detect incremental changes to the replica set configuration, - so that the client remains connected to the set - even while it is migrated to a completely new set of hosts. - -If the application uses some servers very infrequently, -monitoring can also proactively detect state changes -(primary stepdown, server becoming unavailable) -that would otherwise cause future errors. - -Why is auto-discovery the preferred default? -'''''''''''''''''''''''''''''''''''''''''''' - -Auto-discovery is most resilient and is therefore preferred. - -Why is it possible for maxSetVersion to go down? -'''''''''''''''''''''''''''''''''''''''''''''''' - -``maxElectionId`` and ``maxSetVersion`` are actually considered a pair of values -Drivers MAY consider implementing comparison in code as a tuple of the two to ensure their always updated together: - -.. code:: typescript - - // New tuple old tuple - { electionId: 2, setVersion: 1 } > { electionId: 1, setVersion: 50 } - -In this scenario, the maxSetVersion goes from 50 to 1, but the maxElectionId is raised to 2. - -Acknowledgments ---------------- - -Jeff Yemin's code for the Java driver 2.12, -and his patient explanation thereof, -is the major inspiration for this spec. -Mathias Stearn's beautiful design for replica set monitoring in mongos 2.6 -contributed as well. -Bernie Hackett gently oversaw the specification process. - -Changelog ---------- - -:2015-12-17: Require clients to compare (setVersion, electionId) tuples. -:2015-10-09: Specify electionID comparison method. -:2015-06-16: Added cooldownMS. -:2016-05-04: Added link to SDAM monitoring. -:2016-07-18: Replace mentions of the "Read Preferences Spec" with "Server - Selection Spec", and "secondaryAcceptableLatencyMS" with - "localThresholdMS". -:2016-07-21: Updated for Max Staleness support. -:2016-08-04: Explain better why clients use the hostnames in RS config, not URI. -:2016-08-31: Multi-threaded clients SHOULD use hello or legacy hello replies to - update the topology when they handshake application connections. -:2016-10-06: In updateRSWithoutPrimary the hello or legacy hello response's - "primary" field should be used to update the topology description, - even if address != me. -:2016-10-29: Allow for idleWritePeriodMS to change someday. -:2016-11-01: "Unknown" is no longer the default TopologyType, the default is now - explicitly unspecified. Update instructions for setting the initial - TopologyType when running the spec tests. -:2016-11-21: Revert changes that would allow idleWritePeriodMS to change in the - future. -:2017-02-28: Update "network error when reading or writing": timeout while - connecting does mark a server Unknown, unlike a timeout while - reading or writing. Justify the different behaviors, and also - remove obsolete reference to auto-retry. -:2017-06-13: Move socketCheckIntervalMS to Server Selection Spec. -:2017-08-01: Parse logicalSessionTimeoutMinutes from hello or legacy hello reply. -:2017-08-11: Clearer specification of "incompatible" logic. -:2017-09-01: Improved incompatibility error messages. -:2018-03-28: Specify that monitoring must not do mechanism negotiation or authentication. -:2019-05-29: Renamed InterruptedDueToStepDown to InterruptedDueToReplStateChange -:2020-02-13: Drivers must run SDAM flow even when server description is equal to - the last one. -:2020-03-31: Add topologyVersion to ServerDescription. Add rules for ignoring - stale application errors. -:2020-05-07: Include error field in ServerDescription equality comparison. -:2020-06-08: Clarify reasoning behind how SDAM determines if a topologyVersion is stale. -:2020-12-17: Mark the pool for a server as "ready" after performing a successful - check. Synchronize pool clearing with SDAM updates. -:2021-01-17: Require clients to compare (electionId, setVersion) tuples. -:2021-02-11: Errors encountered during auth are handled by SDAM. Auth errors - mark the server Unknown and clear the pool. -:2021-04-12: Adding in behaviour for load balancer mode. -:2021-05-03: Require parsing "isWritablePrimary" field in responses. -:2021-06-09: Connection pools must be created and eventually marked ready for - any server if a direct connection is used. -:2021-06-29: Updated to use modern terminology. -:2022-01-19: Add iscryptd and 90th percentile RTT fields to ServerDescription. -:2022-07-11: Convert integration tests to the unified format. -:2022-09-30: Update ``updateRSFromPrimary`` to include logic before and after 6.0 servers -:2022-10-05: Remove spec front matter, move footnote, and reformat changelog. -:2022-11-17: Add minimum RTT tracking and remove 90th percentile RTT. -:2024-01-17: Add section on expected client close behaviour - ----- - -.. Section for links. - -.. _hello or legacy hello: /source/mongodb-handshake/handshake.rst#terms -.. _connection string: https://www.mongodb.com/docs/manual/reference/connection-string/ -.. _Server Monitoring spec: server-monitoring.rst -.. _SDAM Monitoring Specification: server-discovery-and-monitoring-logging-and-monitoring.rst -.. _requesting an immediate check: server-monitoring.rst#requesting-an-immediate-check -.. _request an immediate check: server-monitoring.rst#requesting-an-immediate-check -.. _scanning order: server-monitoring.rst#scanning-order -.. _clients update the topology from each handshake: server-monitoring.rst#clients-update-the-topology-from-each-handshake -.. _single-threaded monitoring: server-monitoring.rst#single-threaded-monitoring -.. _Connection Monitoring and Pooling spec: ../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md -.. _CMAP spec: ../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md -.. _Authentication spec: ../auth/auth.md -.. _Server Monitoring (Measuring RTT): server-monitoring.rst#measuring-rtt + This specification has been converted to Markdown and renamed to + `server-discovery-and-monitoring.md `_. diff --git a/source/server-discovery-and-monitoring/server-monitoring.md b/source/server-discovery-and-monitoring/server-monitoring.md index 2e30583a4d..b03a960166 100644 --- a/source/server-discovery-and-monitoring/server-monitoring.md +++ b/source/server-discovery-and-monitoring/server-monitoring.md @@ -581,7 +581,7 @@ class Monitor(Thread): wait() def setUpConnection(): - # Take the mutex to avoid a data race becauase this code writes to the connection field and a concurrent + # Take the mutex to avoid a data race because this code writes to the connection field and a concurrent # cancelCheck call could be reading from it. with lock: # Server API versioning implies that the server supports hello. @@ -874,7 +874,7 @@ above mentioned concerns. In the streaming protocol, clients use the hello or legacy hello command on a dedicated connection to measure a server's RTT. However, errors encountered when running the RTT command MUST NOT mark a server Unknown. We reached this decision -because the dedicate RTT connection does not come from a connection pool and thus does not have a generation number +because the dedicated RTT connection does not come from a connection pool and thus does not have a generation number associated with it. Without a generation number we cannot handle errors from the RTT command without introducing race conditions. Introducing such a generation number would add complexity to this design without much benefit. It is safe to ignore these errors because the Monitor will soon discover the server's state regardless (either through an updated diff --git a/source/server-discovery-and-monitoring/tests/README.md b/source/server-discovery-and-monitoring/tests/README.md index a96bcb6490..23f4fe00ab 100644 --- a/source/server-discovery-and-monitoring/tests/README.md +++ b/source/server-discovery-and-monitoring/tests/README.md @@ -193,10 +193,9 @@ Run the following test(s) on MongoDB 4.4+. 6. Wait for the server's RTT to exceed 250ms. Eventually the average RTT should also exceed 500ms but we use 250ms to speed up the test. Note that the - [Server Description Equality](/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#server-description-equality) - rule means that ServerDescriptionChangedEvents will not be published. This test may need to use a driver specific - helper to obtain the latest RTT instead. If the RTT does not exceed 250ms after 10 seconds, consider the test - failed. + [Server Description Equality](../server-discovery-and-monitoring.md#server-description-equality) rule means that + ServerDescriptionChangedEvents will not be published. This test may need to use a driver specific helper to obtain + the latest RTT instead. If the RTT does not exceed 250ms after 10 seconds, consider the test failed. 7. Disable the failpoint: diff --git a/source/server-selection/server-selection.md b/source/server-selection/server-selection.md index 644c67c806..063d67af68 100644 --- a/source/server-selection/server-selection.md +++ b/source/server-selection/server-selection.md @@ -78,7 +78,7 @@ An OP_QUERY operation targeting the '$cmd' collection namespace. A driver connection mode that sends all database operations to a single server without regard for type. - + **Eligible**\ Describes candidate servers that also meet the criteria specified by the `tag_sets` and @@ -228,7 +228,7 @@ once after server selection fails, then either selects a server or raises an err The serverSelectionTryOnce option MUST be true by default. If it is set false, then the driver repeatedly searches for an appropriate server until the selection process times out (pausing -[minHeartbeatFrequencyMS](https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#minheartbeatfrequencyms) +[minHeartbeatFrequencyMS](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#minheartbeatfrequencyms) between attempts, as required by the [Server Discovery and Monitoring](https://github.com/mongodb/specifications/tree/master/source/server-discovery-and-monitoring) spec). @@ -249,8 +249,8 @@ for a ["try once" mode](#try-once-mode).) #### heartbeatFrequencyMS This controls when topology updates are scheduled. See -[heartbeatFrequencyMS](https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#heartbeatfrequencyms) -in the +[heartbeatFrequencyMS](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#heartbeatfrequencyms) in +the [Server Discovery and Monitoring](https://github.com/mongodb/specifications/tree/master/source/server-discovery-and-monitoring) spec for details. @@ -1576,8 +1576,6 @@ maxStalenessSeconds first, then tag_sets, and select Node 2. ## Changelog -- 2024-02-07: Migrated from reStructuredText to Markdown. - - 2015-06-26: Updated single-threaded selection logic with "stale" and serverSelectionTryOnce. - 2015-08-10: Updated single-threaded selection logic to ensure a scan always\ @@ -1656,5 +1654,7 @@ maxStalenessSeconds first, then tag_sets, and select Node 2. - 2023-08-26: Add list of deprioritized servers for sharded cluster topology. +- 2024-02-07: Migrated from reStructuredText to Markdown. + [^1]: mongos 3.4 refuses to connect to mongods with maxWireVersion \< 5, so it does no additional wire version checks related to maxStalenessSeconds. diff --git a/source/server-selection/tests/logging/operation-id.json b/source/server-selection/tests/logging/operation-id.json index 276e4b8d6d..6cdbcb3f5a 100644 --- a/source/server-selection/tests/logging/operation-id.json +++ b/source/server-selection/tests/logging/operation-id.json @@ -47,6 +47,9 @@ } } ], + "_yamlAnchors": { + "namespace": "logging-tests.server-selection" + }, "tests": [ { "description": "Successful bulkWrite operation: log messages have operationIds", @@ -224,6 +227,190 @@ ] } ] + }, + { + "description": "Successful client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "Failed client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "bulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] } ] } diff --git a/source/server-selection/tests/logging/operation-id.yml b/source/server-selection/tests/logging/operation-id.yml index 430e81a58b..24e48f9410 100644 --- a/source/server-selection/tests/logging/operation-id.yml +++ b/source/server-selection/tests/logging/operation-id.yml @@ -30,6 +30,9 @@ createEntities: - client: id: &failPointClient failPointClient +_yamlAnchors: + namespace: &namespace "logging-tests.server-selection" + tests: - description: "Successful bulkWrite operation: log messages have operationIds" operations: @@ -122,3 +125,97 @@ tests: operationId: { $$type: [int, long] } operation: insert + - description: "Successful client bulkWrite operation: log messages have operationIds" + runOnRequirements: + - minServerVersion: "8.0" # required for bulkWrite command + operations: + # ensure we've discovered the server so it is immediately available + # and no extra "waiting for suitable server" messages are emitted. + # expected topology events reflect initial server discovery and server connect event. + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + topologyDescriptionChangedEvent: {} + count: 2 + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: *namespace + document: { x: 1 } + expectLogMessages: + - client: *client + messages: + - level: debug + component: serverSelection + data: + message: "Server selection started" + operationId: { $$type: [int, long] } + operation: bulkWrite + - level: debug + component: serverSelection + data: + message: "Server selection succeeded" + operationId: { $$type: [int, long] } + operation: bulkWrite + + - description: "Failed client bulkWrite operation: log messages have operationIds" + runOnRequirements: + - minServerVersion: "8.0" # required for bulkWrite command + operations: + # fail all hello/legacy hello commands for the main client. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: alwaysOn + data: + failCommands: ["hello", "ismaster"] + appName: *appName + closeConnection: true + # wait until we've marked the server unknown due + # to a failed heartbeat. + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + serverDescriptionChangedEvent: + newDescription: + type: Unknown + count: 1 + - name: bulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: *namespace + document: { x: 1 } + expectError: + isClientError: true # server selection timeout + expectLogMessages: + - client: *client + messages: + - level: debug + component: serverSelection + data: + message: "Server selection started" + operationId: { $$type: [int, long] } + operation: bulkWrite + - level: info + component: serverSelection + data: + message: "Waiting for suitable server to become available" + operationId: { $$type: [int, long] } + operation: bulkWrite + - level: debug + component: serverSelection + data: + message: "Server selection failed" + operationId: { $$type: [int, long] } + operation: bulkWrite diff --git a/source/server_write_commands.rst b/source/server_write_commands.rst index 0626d54829..237f022a25 100644 --- a/source/server_write_commands.rst +++ b/source/server_write_commands.rst @@ -464,13 +464,21 @@ response would look, had the request asked for that write concern. FAQ --- -Can a driver still use the OP_INSERT, OP_DELETE, OP_UPDATE? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Why are ``_id`` values generated client-side by default for new documents? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Yes, a 2.6 server will still support those. But it is unlikely that a 2.8 server would. Of course, when talking to older servers, the usual op codes will continue working the same. An older server is one that reports ``hello.maxWireVersion`` to be less than 2 or does not include the field. +Though drivers may expose configuration options to prevent this behavior, by default a new ``ObjectId`` value will be created client-side before an ``insert`` operation. + +This design decision primarily stems from the fact that MongoDB is a distributed database and the typical unique auto-incrementing scalar value most RDBMS' use for generating a primary key would not be robust enough, necessitating the need for a more robust data type (``ObjectId`` in this case). These ``_id`` values can be generated either on the client or the server, however when done client-side a new document's ``_id`` value is immediately available for use without the need for a network round trip. + +Prior to MongoDB 3.6, an ``insert`` operation would use the ``OP_INSERT`` opcode of the wire protocol to send the operation, and retrieve the results subsequently with a ``getLastError`` command. If client-side ``_id`` values were omitted, this command response wouldn't contain the server-created ``_id`` values for new documents. Following MongoDB 3.6 when all commands would be issued using the ``OP_MSG`` wire protocol opcode (``insert`` included), the response to the command still wouldn't contain the ``_id`` values for inserted documents. + + +Can a driver still use the ``OP_INSERT``, ``OP_DELETE``, ``OP_UPDATE``? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The `legacy opcodes were removed in MongoDB 6.0 `_. As of MongoDB 3.6 these opcodes were superseded by `OP_MSG `_, however all server versions up until 6.0 continued to support the legacy opcodes. -The rationale here is that we may choose to divert all the write traffic to the new -protocol. (This depends on the having the overhead to issue a batch with one item very low.) Can an application still issue requests with write concerns {w: 0}? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -494,10 +502,12 @@ Yes but as of 2.6 the existing getLastError behavior is supported for backward c Changelog --------- -:2014-05-14: First public version +:2024-06-04: Add FAQ entry outlining client-side _id value generation + Update FAQ to indicate legacy opcodes were removed +:2022-10-05: Revise spec front matter and reformat changelog. +:2022-07-25: Remove outdated value for ``maxWriteBatchSize`` +:2021-04-22: Updated to use hello command :2014-05-15: Removed text related to bulk operations; see the Bulk API spec for bulk details. Clarified some paragraphs; re-ordered the response field sections. -:2021-04-22: Updated to use hello command -:2022-07-25: Remove outdated value for ``maxWriteBatchSize`` -:2022-10-05: Revise spec front matter and reformat changelog. +:2014-05-14: First public version diff --git a/source/sessions/driver-sessions.md b/source/sessions/driver-sessions.md new file mode 100644 index 0000000000..c277494694 --- /dev/null +++ b/source/sessions/driver-sessions.md @@ -0,0 +1,939 @@ +# Sessions Specification + +- Status: Accepted +- Minimum Server Version: 3.6 + +______________________________________________________________________ + +## Abstract + +Version 3.6 of the server introduces the concept of logical sessions for clients. A session is an abstract concept that +represents a set of sequential operations executed by an application that are related in some way. This specification is +limited to how applications start and end sessions. Other specifications define various ways in which sessions are used +(e.g. causally consistent reads, retryable writes, or transactions). + +This specification also discusses how drivers participate in distributing the cluster time throughout a deployment, a +process known as "gossipping the cluster time". While gossipping the cluster time is somewhat orthogonal to sessions, +any driver that implements sessions MUST also implement gossipping the cluster time, so it is included in this +specification. + +## Definitions + +### META + +The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and +"OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt). + +### Terms + +**ClientSession**\ +The driver object representing a client session and the operations that can be performed on it. +Depending on the language a driver is written in this might be an interface or a class. See also `ServerSession`. + +**Deployment**\ +A set of servers that are all part of a single MongoDB cluster. We avoid the word "cluster" because some +people interpret "cluster" to mean "sharded cluster". + +**Explicit session**\ +A session that was started explicitly by the application by calling `startSession` and passed as +an argument to an operation. + +**MongoClient**\ +The root object of a driver's API. MAY be named differently in some drivers. + +**Implicit session**\ +A session that was started implicitly by the driver because the application called an operation +without providing an explicit session. + +**MongoCollection**\ +The driver object representing a collection and the operations that can be performed on it. MAY be +named differently in some drivers. + +**MongoDatabase**\ +The driver object representing a database and the operations that can be performed on it. MAY be +named differently in some drivers. + +**ServerSession**\ +The driver object representing a server session. This type is an implementation detail and does not +need to be public. See also `ClientSession`. + +**Server session ID**\ +A server session ID is a token used to identify a particular server session. A driver can ask the +server for a session ID using the `startSession` command or it can generate one locally (see Generating a Session ID +locally). + +**Session**\ +A session is an abstract concept that represents a set of sequential operations executed by an application +that are related in some way. Other specifications define the various ways in which operations can be related, but +examples include causally consistent reads and retryable writes. + +**Topology**\ +The current configuration and state of a deployment. + +**Unacknowledged writes**\ +Unacknowledged writes are write operations that are sent to the server without waiting for a +reply acknowledging the write. See the "When using unacknowledged writes" section below for information on how +unacknowledged writes interact with sessions. + +**Network error**\ +Any network exception writing to or reading from a socket (e.g. a socket timeout or error). + +## Specification + +Drivers currently have no concept of a session. The driver API will be expanded to provide a way for applications to +start and end sessions and to execute operations in the context of a session. The goal is to expand the API in a way +that introduces no backward breaking changes. Existing applications that don't use sessions don't need to be changed, +and new applications that don't need sessions can continue to be written using the existing API. + +To use sessions an application will call new (or overloaded) methods that take a session parameter. + +## Naming variations + +This specification defines names for new methods and types. To the extent possible, these names SHOULD be used by +drivers. However, where a driver and/or language's naming conventions differ, those naming conventions SHOULD be used. +For example, a driver might name a method `StartSession` or `start_session` instead of `startSession`, or might name a +type `client_session` instead of `ClientSession`. + +## High level summary of the API changes for sessions + +This section is just a high level summary of the new API. Details are provided further on. + +Applications start a new session like this: + +```typescript +options = new SessionOptions(/* various settings */); +session = client.startSession(options); +``` + +The `SessionOptions` will be individually defined in several other specifications. It is expected that the set of +`SessionOptions` will grow over time as sessions are used for new purposes. + +Applications use a session by passing it as an argument to operation methods. For example: + +```typescript +collection.InsertOne(session /* etc. */) +collection.UpdateOne(session /* etc. */) +``` + +Applications end a session like this: + +```typescript +session.endSession() +``` + +This specification does not deal with multi-document transactions, which are covered in +[their own specification](../transactions/transactions.md). + +## MongoClient changes + +`MongoClient` interface summary + +```java +class SessionOptions { + // various other options as defined in other specifications +} + +interface MongoClient { + ClientSession startSession(SessionOptions options); + // other existing members of MongoClient +} +``` + +Each new member is documented below. + +While it is not part of the public API, `MongoClient` also needs a private (or internal) `clusterTime` member +(containing either a BSON document or null) to record the highest `clusterTime` observed in a deployment (as described +below in [Gossipping the cluster time](#gossipping-the-cluster-time)). + +### startSession + +The `startSession` method starts a new `ClientSession` with the provided options. + +It MUST NOT be possible to change the options provided to `startSession` after `startSession` has been called. This can +be accomplished by making the `SessionOptions` class immutable or using some equivalent mechanism that is idiomatic for +your language. + +It is valid to call `startSession` with no options set. This will result in a `ClientSession` that has no effect on the +operations performed in the context of that session, other than to include a session ID in commands sent to the server. + +The `SessionOptions` MAY be a strongly typed class in some drivers, or MAY be a loosely typed dictionary in other +drivers. Drivers MUST define `SessionOptions` in such a way that new options can be added in a backward compatible way +(it is acceptable for backward compatibility to be at the source level). + +A `ClientSession` MUST be associated with a `ServerSession` at the time `startSession` is called. As an implementation +optimization drivers MUST reuse `ServerSession` instances across multiple `ClientSession` instances subject to the rule +that a server session MUST NOT be used by two `ClientSession` instances at the same time (see the Server Session Pool +section). Additionally, a `ClientSession` may only ever be associated with one `ServerSession` for its lifetime. + +Drivers MUST NOT check for session support in `startSession`. Instead, if sessions are not supported, the error MUST be +reported the first time the session is used for an operation (See +[How to Tell Whether a Connection Supports Sessions](#how-to-tell-whether-a-connection-supports-sessions)). + +### Explicit vs implicit sessions + +An explicit session is one started explicitly by the application by calling `startSession`. An implicit session is one +started implicitly by the driver because the application called an operation without providing an explicit session. +Internally, a driver must be able to distinguish between explicit and implicit sessions, but no public API for this is +necessary because an application will never see an implicit session. + +The motivation for starting an implicit session for all methods that don't take an explicit session parameter is to make +sure that all commands that are sent to the server are tagged with a session ID. This improves the ability of an +operations team to monitor (and kill if necessary) long running operations. Tagging an operation with a session ID is +specially useful if a deployment wide operation needs to be killed. + +### Authentication + +When using authentication, using a session requires that only a single user be authenticated. Drivers that still support +authenticating multiple users at once MAY continue to do so, but MUST NOT allow sessions to be used under such +circumstances. + +If `startSession` is called when multiple users are authenticated drivers MUST raise an error with the error message +"Cannot call startSession when multiple users are authenticated." + +If a driver allows authentication to be changed on the fly (presumably few still do) the driver MUST either prevent +`ClientSession` instances from being used with a connection that doesn't have matching authentication or MUST return an +error if such use is attempted. + +## ClientSession + +`ClientSession` instances are not thread safe or fork safe. They can only be used by one thread or process at a time. + +Drivers MUST document the thread-safety and fork-safety limitations of sessions. Drivers MUST NOT attempt to detect +simultaneous use by multiple threads or processes (see Q&A for the rationale). + +ClientSession interface summary: + +```java +interface ClientSession { + MongoClient client; + Optional clusterTime; + SessionOptions options; + BsonDocument sessionId; + + void advanceClusterTime(BsonDocument clusterTime); + void endSession(); +} +``` + +While it is not part of the public API, a `ClientSession` also has a private (or internal) reference to a +`ServerSession`. + +Each member is documented below. + +### client + +This property returns the `MongoClient` that was used to start this `ClientSession`. + +### clusterTime + +This property returns the most recent cluster time seen by this session. If no operations have been executed using this +session this value will be null unless `advanceClusterTime` has been called. This value will also be null when a cluster +does not report cluster times. + +When a driver is gossiping the cluster time it should send the more recent `clusterTime` of the `ClientSession` and the +`MongoClient`. + +### options + +This property returns the `SessionOptions` that were used to start this `ClientSession`. + +### sessionId + +This property returns the session ID of this session. Note that since `ServerSessions` are pooled, different +`ClientSession` instances can have the same session ID, but never at the same time. + +### advanceClusterTime + +This method advances the `clusterTime` for a session. If the new `clusterTime` is greater than the session's current +`clusterTime` then the session's `clusterTime` MUST be advanced to the new `clusterTime`. If the new `clusterTime` is +less than or equal to the session's current `clusterTime` then the session's `clusterTime` MUST NOT be changed. + +This method MUST NOT advance the `clusterTime` in `MongoClient` because we have no way of verifying that the supplied +`clusterTime` is valid. If the `clusterTime` in `MongoClient` were set to an invalid value all future operations with +this `MongoClient` would result in the server returning an error. The `clusterTime` in `MongoClient` should only be +advanced with a `$clusterTime` received directly from a server. + +### endSession + +This method ends a `ClientSession`. + +In languages that have idiomatic ways of disposing of resources, drivers SHOULD support that in addition to or instead +of `endSession`. For example, in the .NET driver `ClientSession` would implement `IDisposable` and the application could +choose to call `session.Dispose` or put the session in a using statement instead of calling `session.endSession`. If +your language has an idiomatic way of disposing resources you MAY choose to implement that in addition to or instead of +`endSession`, whichever is more idiomatic for your language. + +A driver MUST allow multiple calls to `endSession` (or `Dispose`). All calls after the first one are ignored. + +Conceptually, calling `endSession` implies ending the corresponding server session (by calling the `endSessions` +command). As an implementation detail drivers SHOULD cache server sessions for reuse (see Server Session Pool). + +Once a `ClientSession` has ended, drivers MUST report an error if any operations are attempted with that +`ClientSession`. + +## ServerSession + +A `ServerSession` is the driver object that tracks a server session. This object is an implementation detail and does +not need to be public. Drivers may store this information however they choose; this data structure is defined here +merely to describe the operation of the server session pool. + +ServerSession interface summary + +```java +interface ServerSession { + BsonDocument sessionId; + DateTime lastUse; +} +``` + +### sessionId + +This property returns the server session ID. + +### lastUse + +The driver MUST update the value of this property with the current DateTime every time the server session ID is sent to +the server. This allows the driver to track with reasonable accuracy the server's view of when a server session was last +used. + +### Creating a ServerSession + +When a driver needs to create a new `ServerSession` instance the only information it needs is the session ID to use for +the new session. It can either get the session ID from the server by running the `startSession` command, or it can +generate it locally. + +In either case, the lastUse field of the `ServerSession` MUST be set to the current time when the `ServerSession` is +created. + +### Generating a session ID locally + +Running the `startSession` command to get a session ID for a new session requires a round trip to the server. As an +optimization the server allows drivers to generate new session IDs locally and to just start using them. When a server +sees a new session ID that it has never seen before it simply assumes that it is a new session. + +A session ID is a `BsonDocument` that has the following form: + +```typescript +interface SessionId { + id: UUID +} +``` + +Where the UUID is encoded as a BSON binary value of subtype 4. + +The id field of the session ID is a version 4 UUID that must comply with the format described in RFC 4122. Section 4.4 +describes an algorithm for generating correctly-versioned UUIDs from a pseudo-random number generator. + +If a driver is unable to generate a version 4 UUID it MAY instead run the `startSession` command and let the server +generate the session ID. + +## MongoDatabase changes + +All `MongoDatabase` methods that talk to the server MUST send a session ID with the command when connected to a +deployment that supports sessions so that the server can associate the operation with a session ID. + +### New database methods that take an explicit session + +All `MongoDatabase` methods that talk to the server SHOULD be overloaded to take an explicit session parameter. (See +[why is session an explicit parameter?](#why-is-session-an-explicit-parameter).) + +When overloading methods to take a session parameter, the session parameter SHOULD be the first parameter. If +overloading is not possible for your language, it MAY be in a different position or MAY be embedded in an options +structure. + +Methods that have a session parameter MUST check that the session argument is not null and was created by the same +`MongoClient` that this `MongoDatabase` came from and report an error if they do not match. + +### Existing database methods that start an implicit session + +When an existing `MongoDatabase` method that does not take a session is called, the driver MUST behave as if a new +`ClientSession` was started just for this one operation and ended immediately after this operation completes. The actual +implementation will likely involve calling `client.startSession`, but that is not required by this spec. Regardless, +please consult the startSession section to replicate the required steps for creating a session. The driver MUST NOT use +the session if the checked out connection does not support sessions (see +[How to Tell Whether a Connection Supports Sessions](#how-to-tell-whether-a-connection-supports-sessions)) and, in all +cases, MUST NOT consume a server session id until after the connection is checked out and session support is confirmed. + +## MongoCollection changes + +All `MongoCollection` methods that talk to the server MUST send a session ID with the command when connected to a +deployment that supports sessions so that the server can associate the operation with a session ID. + +### New collection methods that take an explicit session + +All `MongoCollection` methods that talk to the server, with the exception of `estimatedDocumentCount`, SHOULD be +overloaded to take an explicit session parameter. (See +[why is session an explicit parameter?](#why-is-session-an-explicit-parameter).) + +When overloading methods to take a session parameter, the session parameter SHOULD be the first parameter. If +overloading is not possible for your language, it MAY be in a different position or MAY be embedded in an options +structure. + +Methods that have a session parameter MUST check that the session argument is not null and was created by the same +`MongoClient` that this `MongoCollection` came from and report an error if they do not match. + +The `estimatedDocumentCount` helper does not support an explicit session parameter. The underlying command, `count`, is +not supported in a transaction, so supporting an explicit session would likely confuse application developers. The +helper returns an estimate of the documents in a collection and causal consistency is unlikely to improve the accuracy +of the estimate. + +### Existing collection methods that start an implicit session + +When an existing `MongoCollection` method that does not take a session is called, the driver MUST behave as if a new +`ClientSession` was started just for this one operation and ended immediately after this operation completes. The actual +implementation will likely involve calling `client.startSession`, but that is not required by this spec. Regardless, +please consult the startSession section to replicate the required steps for creating a session. The driver MUST NOT use +the session if the checked out connection does not support sessions (see +[How to Tell Whether a Connection Supports Sessions](#how-to-tell-whether-a-connection-supports-sessions)) and, in all +cases, MUST NOT consume a server session id until after the connection is checked out and session support is confirmed. + +## Sessions and Cursors + +When an operation using a session returns a cursor, all subsequent `GETMORE` commands for that cursor MUST be run using +the same session ID. + +If a driver decides to run a `KILLCURSORS` command on the cursor, it also MAY be run using the same session ID. See the +Exceptions below for when it is permissible to not include a session ID in a `KILLCURSORS` command. + +## Sessions and Connections + +To reduce the number of `ServerSessions` created, the driver MUST only obtain an implicit session's `ServerSession` +after it successfully checks out a connection. A driver SHOULD NOT attempt to release the acquired session before +connection check in. + +Explicit sessions MAY be changed to allocate a server session similarly. + +## How to Tell Whether a Connection Supports Sessions + +A driver can determine whether a connection supports sessions by checking whether the `logicalSessionTimeoutMinutes` +property of the establishing handshake response has a value or not. If it has a value, sessions are supported. + +In the case of an explicit session, if sessions are not supported, the driver MUST raise an error. In the case of an +implicit session, if sessions are not supported, the driver MUST ignore the session. + +### Possible race condition when checking for session support + +There is a possible race condition that can happen between the time the driver checks whether sessions are supported and +subsequently sends a command to the server: + +- The server might have supported sessions at the time the connection was first opened (and reported a value for + logicalSessionTimeoutMinutes in the initial response to the [handshake](../mongodb-handshake/handshake.rst)), but have + subsequently been downgraded to not support sessions. The server does not close the socket in this scenario, so the + driver will conclude that the server at the other end of this connection supports sessions. + +There is nothing that the driver can do about this race condition, and the server will just return an error in this +scenario. + +## Sending the session ID to the server on all commands + +When connected to a server that supports sessions a driver MUST append the session ID to every command it sends to the +server (with the exceptions noted in the following section). It does this by adding a top level `lsid` field to the +command sent to the server. A driver MUST do this without modifying any data supplied by the application (e.g. the +command document passed to runCommand).: + +```typescript +interface ExampleCommandWithLSID { + foo: 1; + lsid: SessionId; +} +``` + +## Exceptions to sending the session ID to the server on all commands + +There are some exceptions to the rule that a driver MUST append the session ID to every command it sends to the server. + +### When opening and authenticating a connection + +A driver MUST NOT append a session ID to any command sent during the process of opening and authenticating a connection. + +### When monitoring the state of a deployment + +A driver MAY omit a session ID in hello and legacy hello commands sent solely for the purposes of monitoring the state +of a deployment. + +### When sending a parallelCollectionScan command + +Sessions are designed for sequential operations and `parallelCollectionScan` is designed for parallel operation. Because +these are fundamentally incompatible goals, drivers MUST NOT append session ID to the `parallelCollectionScan` command +so that the resulting cursors have no associated session ID and thus can be used in parallel. + +### When sending a killCursors command + +A driver MAY omit a session ID in `killCursors` commands for two reasons. First, `killCursors` is only ever sent to a +particular server, so operation teams wouldn't need the `lsid` for cluster-wide killOp. An admin can manually kill the +op with its operation id in the case that it is slow. Secondly, some drivers have a background cursor reaper to kill +cursors that aren't exhausted and closed. Due to GC semantics, it can't use the same `lsid` for `killCursors` as was +used for a cursor's `find` and `getMore`, so there's no point in using any `lsid` at all. + +### When multiple users are authenticated and the session is implicit + +The driver MUST NOT send a session ID from an implicit session when multiple users are authenticated. If possible the +driver MUST NOT start an implicit session when multiple users are authenticated. Alternatively, if the driver cannot +determine whether multiple users are authenticated at the point in time that an implicit session is started, then the +driver MUST ignore any implicit sessions that subsequently end up being used on a connection that has multiple users +authenticated. + +### When using unacknowledged writes + +A session ID MUST NOT be used simultaneously by more than one operation. Since drivers don't wait for a response for an +unacknowledged write a driver would not know when the session ID could be reused. In theory a driver could use a new +session ID for each unacknowledged write, but that would result in many orphaned sessions building up at the server. + +Therefore drivers MUST NOT send a session ID with unacknowledged writes under any circumstances: + +- For unacknowledged writes with an explicit session, drivers SHOULD raise an error. If a driver allows users to provide + an explicit session with an unacknowledged write (e.g. for backwards compatibility), the driver MUST NOT send the + session ID. +- For unacknowledged writes without an explicit session, drivers SHOULD NOT use an implicit session. If a driver creates + an implicit session for unacknowledged writes without an explicit session, the driver MUST NOT send the session ID. + +Drivers MUST document the behavior of unacknowledged writes for both explicit and implicit sessions. + +### When wrapping commands in a `$query` field + +If the driver is wrapping the command in a `$query` field for non-OP_MSG messages in order to pass a readPreference to a +mongos (see [ReadPreference and Mongos](../find_getmore_killcursors_commands.rst#readpreference-and-mongos)), the driver +SHOULD NOT add the `lsid` as a top-level field, and MUST add the `lsid` as a field of the `$query` + +```typescript +// Wrapped command: +interface WrappedCommandExample { + $query: { + find: { foo: 1 } + }, + $readPreference: {} +} + +// Correct application of lsid +interface CorrectLSIDUsageExample { + $query: { + find: { foo: 1 }, + lsid: SessionId + }, + $readPreference: {} +} + +// Incorrect application of lsid +interface IncorrectLSIDUsageExample { + $query: { + find: { foo: 1 } + }, + $readPreference: {}, + lsid: SessionId +} +``` + +## Server Commands + +### startSession + +The `startSession` server command has the following format: + +```typescript +interface StartSessionCommand { + startSession: 1; + $clusterTime?: ClusterTime; +} +``` + +The `$clusterTime` field should only be sent when gossipping the cluster time. See the section "Gossipping the cluster +time" for information on `$clusterTime`. + +The `startSession` command MUST be sent to the `admin` database. + +The server response has the following format: + +```typescript +interface StartSessionResponse { + ok: 1; + id: BsonDocument; +} +``` + +In case of an error, the server response has the following format: + +```typescript +interface StartSessionError { + ok: 0; + errmsg: string; + code: number; +} +``` + +When connected to a replica set the `startSession` command MUST be sent to the primary if the primary is available. The +`startSession` command MAY be sent to a secondary if there is no primary available at the time the `startSession` +command needs to be run. + +Drivers SHOULD generate session IDs locally if possible instead of running the `startSession` command, since running the +command requires a network round trip. + +### endSessions + +The `endSessions` server command has the following format: + +```typescript +interface EndSessionCommand { + endSessions: Array; + $clusterTime?: ClusterTime; +} +``` + +The `$clusterTime` field should only be sent when gossipping the cluster time. See the section of "Gossipping the +cluster time" for information on `$clusterTime`. + +The `endSessions` command MUST be sent to the `admin` database. + +The server response has the following format: + +```typescript +interface EndSessionResponse { + ok: 1; +} +``` + +In case of an error, the server response has the following format: + +```typescript +interface EndSessionError { + ok: 0; + errmsg: string; + code: number; +} +``` + +Drivers MUST ignore any errors returned by the `endSessions` command. + +The `endSessions` command MUST be run once when the `MongoClient` instance is shut down. If the number of sessions is +very large the `endSessions` command SHOULD be run multiple times to end 10,000 sessions at a time (in order to avoid +creating excessively large commands). + +When connected to a sharded cluster the `endSessions` command can be sent to any mongos. When connected to a replica set +the `endSessions` command MUST be sent to the primary if the primary is available, otherwise it MUST be sent to any +available secondary. + +## Server Session Pool + +Conceptually, each `ClientSession` can be thought of as having a new corresponding `ServerSession`. However, starting a +server session might require a round trip to the server (which can be avoided by generating the session ID locally) and +ending a session requires a separate round trip to the server. Drivers can operate more efficiently and put less load on +the server if they cache `ServerSession` instances for reuse. To this end drivers MUST implement a server session pool +containing `ServerSession` instances available for reuse. A `ServerSession` pool MUST belong to a `MongoClient` instance +and have the same lifetime as the `MongoClient` instance. + +When a new implicit `ClientSession` is started it MUST NOT attempt to acquire a server session from the server session +pool immediately. When a new explicit `ClientSession` is started it MAY attempt to acquire a server session from the +server session pool immediately. See the algorithm below for the steps to follow when attempting to acquire a +`ServerSession` from the server session pool. + +Note that `ServerSession` instances acquired from the server session pool might have as little as one minute left before +becoming stale and being discarded server side. Drivers MUST document that if an application waits more than one minute +after calling `startSession` to perform operations with that session it risks getting errors due to the server session +going stale before it was used. + +A server session is considered stale by the server when it has not been used for a certain amount of time. The default +amount of time is 30 minutes, but this value is configurable on the server. Servers that support sessions will report +this value in the `logicalSessionTimeoutMinutes` field of the reply to the hello and legacy hello commands. The smallest +reported timeout is recorded in the `logicalSessionTimeoutMinutes` property of the `TopologyDescription`. See the Server +Discovery And Monitoring specification for details. + +When a `ClientSession` is ended it MUST return the server session to the server session pool. See the algorithm below +for the steps to follow when returning a `ServerSession` instance to the server session pool. + +The server session pool has no maximum size. The pool only shrinks when a server session is acquired for use or +discarded. + +When a `MongoClient` instance is closed the driver MUST proactively inform the server that the pooled server sessions +will no longer be used by sending one or more `endSessions` commands to the server. + +The server session pool is modeled as a double ended queue. The algorithms below require the ability to add and remove +`ServerSession` instances from the front of the queue and to inspect and possibly remove `ServerSession` instances from +the back of the queue. The front of the queue holds `ServerSession` instances that have been released recently and +should be the first to be reused. The back of the queue holds `ServerSession` instances that have not been used recently +and that potentially will be discarded if they are not used again before they expire. + +An implicit session MUST be returned to the pool immediately following the completion of an operation. When an implicit +session is associated with a cursor for use with `getMore` operations, the session MUST be returned to the pool +immediately following a `getMore` operation that indicates that the cursor has been exhausted. In particular, it MUST +not wait until all documents have been iterated by the application or until the application disposes of the cursor. For +language runtimes that provide the ability to attach finalizers to objects that are run prior to garbage collection, the +cursor class SHOULD return an implicit session to the pool in the finalizer if the cursor has not already been +exhausted. + +If a driver supports process forking, the session pool needs to be cleared on one side of the forked processes (just +like sockets need to reconnect). Drivers MUST provide a way to clear the session pool without sending `endSessions`. +Drivers MAY make this automatic when the process ID changes. If they do not, they MUST document how to clear the session +pool wherever they document fork support. After clearing the session pool in this way, drivers MUST ensure that sessions +already checked out are not returned to the new pool. + +If a driver has a server session pool and a network error is encountered when executing any command with a +`ClientSession`, the driver MUST mark the associated `ServerSession` as dirty. Dirty server sessions are discarded when +returned to the server session pool. It is valid for a dirty session to be used for subsequent commands (e.g. an +implicit retry attempt, a later command in a bulk write, or a later operation on an explicit session), however, it MUST +remain dirty for the remainder of its lifetime regardless if later commands succeed. + +### Algorithm to acquire a ServerSession instance from the server session pool + +1. If the server session pool is empty create a new `ServerSession` and use it +2. Otherwise remove a `ServerSession` from the front of the queue and examine it: + - If the driver is in load balancer mode, use this `ServerSession`. + - If it has at least one minute left before becoming stale use this `ServerSession` + - If it has less than one minute left before becoming stale discard it (let it be garbage collected) and return to + step 1. + +See the [Load Balancer Specification](../load-balancers/load-balancers.md#session-expiration) for details on session +expiration. + +### Algorithm to return a ServerSession instance to the server session pool + +1. Before returning a server session to the pool a driver MUST first check the server session pool for server sessions + at the back of the queue that are about to expire (meaning they will expire in less than one minute). A driver MUST + stop checking server sessions once it encounters a server session that is not about to expire. Any server sessions + found that are about to expire are removed from the end of the queue and discarded (or allowed to be garbage + collected) +2. Then examine the server session that is being returned to the pool and: + - If this session is marked dirty (i.e. it was involved in a network error) discard it (let it be garbage collected) + - If it will expire in less than one minute discard it (let it be garbage collected) + - If it won't expire for at least one minute add it to the front of the queue + +## Gossipping the cluster time + +Drivers MUST gossip the cluster time when connected to a deployment that uses cluster times. + +Gossipping the cluster time is a process in which the driver participates in distributing the logical cluster time in a +deployment. Drivers learn the current cluster time (from a particular server's perspective) in responses they receive +from servers. Drivers in turn forward the highest cluster time they have seen so far to any server they subsequently +send commands to. + +A driver detects that it MUST participate in gossipping the cluster time when it sees a `$clusterTime` in a response +received from a server. + +### Receiving the current cluster time + +Drivers MUST examine all responses from the server commands to see if they contain a top level field named +`$clusterTime` formatted as follows: + +```typescript +interface ClusterTime { + clusterTime: Timestamp; + signature: { + hash: Binary; + keyId: Int64; + }; +} + +interface AnyServerResponse { + // ... other properties ... + $clusterTime: ClusterTime; +} +``` + +Whenever a driver receives a cluster time from a server it MUST compare it to the current highest seen cluster time for +the deployment. If the new cluster time is higher than the highest seen cluster time it MUST become the new highest seen +cluster time. Two cluster times are compared using only the BsonTimestamp value of the `clusterTime` embedded field (be +sure to include both the timestamp and the increment of the BsonTimestamp in the comparison). The signature field does +not participate in the comparison. + +### Sending the highest seen cluster time + +Whenever a driver sends a command to a server it MUST include the highest seen cluster time in a top level field called +`$clusterTime`, in the same format as it was received in (but see Gossipping with mixed server versions below). + +### How to compute the `$clusterTime` to send to a server + +When sending `$clusterTime` to the server the driver MUST send the greater of the `clusterTime` values from +`MongoClient` and `ClientSession`. Normally a session's `clusterTime` will be less than or equal to the `clusterTime` in +`MongoClient`, but it could be greater than the `clusterTime` in `MongoClient` if `advanceClusterTime` was called with a +`clusterTime` that came from somewhere else. + +A driver MUST NOT use the `clusterTime` of a `ClientSession` anywhere else except when executing an operation with this +session. This rule protects the driver from the scenario where `advanceClusterTime` was called with an invalid +`clusterTime` by limiting the resulting server errors to the one session. The `clusterTime` of a `MongoClient` MUST NOT +be advanced by any `clusterTime` other than a `$clusterTime` received directly from a server. + +The safe way to compute the `$clusterTime` to send to a server is: + +1. When the `ClientSession` is first started its `clusterTime` is set to null. + +2. When the driver sends `$clusterTime` to the server it should send the greater of the `ClientSession` `clusterTime` + and the `MongoClient` `clusterTime` (either one could be null). + +3. When the driver receives a `$clusterTime` from the server it should advance both the `ClientSession` and the + `MongoClient` `clusterTime`. The `clusterTime` of a `ClientSession` can also be advanced by calling + `advanceClusterTime`. + +This sequence ensures that if the `clusterTime` of a `ClientSession` is invalid only that one session will be affected. +The `MongoClient` `clusterTime` is only updated with `$clusterTime` values known to be valid because they were received +directly from a server. + +### Tracking the highest seen cluster time does not require checking the deployment topology or the server version + +Drivers do not need to check the deployment topology or the server version they are connected to in order to track the +highest seen `$clusterTime`. They simply need to check for the presence of the `$clusterTime` field in responses +received from servers. + +### Gossipping with mixed server versions + +Drivers MUST check that the server they are sending a command to supports `$clusterTime` before adding `$clusterTime` to +the command. A server supports `$clusterTime` when the `maxWireVersion` >= 6. + +This supports the (presumably short lived) scenario where not all servers have been upgraded to 3.6. + +## Test Plan + +See the [README](tests/README.md) for tests. + +## Motivation + +Drivers currently have no concept of a session. The driver API needs to be extended to support sessions. + +## Design Rationale + +The goal is to modify the driver API in such a way that existing programs that don't use sessions continue to compile +and run correctly. This goal is met by defining new methods (or overloads) that take a session parameter. An application +does not need to be modified unless it wants to take advantage of the new features supported by sessions. + +## Backwards Compatibility + +The API changes to support sessions extend the existing API but do not introduce any backward breaking changes. Existing +programs that don't use sessions continue to compile and run correctly. + +## Reference Implementation (always required) + +A reference implementation must be completed before any spec is given status "Final", but it need not be completed +before the spec is "Accepted". While there is merit to the approach of reaching consensus on the specification and +rationale before writing code, the principle of "rough consensus and running code" is still useful when it comes to +resolving many discussions of spec details. A final reference implementation must include test code and documentation. + +The C and C# drivers will do initial POC implementations. + +## Future work (optional) + +Use this section to discuss any possible work for a future spec. This could cover issues where no consensus could be +reached but that don't block this spec, changes that were rejected due to unclear use cases, etc. + +## Open questions + +## Q&A + +### Why do we say drivers MUST NOT attempt to detect unsafe multi-threaded or multi-process use of `ClientSession`? + +Because doing so would provide an illusion of safety. It doesn't make these instances thread safe. And even if when +testing an application no such exceptions are encountered, that doesn't prove anything. The application might still be +using the instances in a thread-unsafe way and just didn't happen to do so during a test run. The final argument is that +checking this would require overhead that doesn't provide any clear benefit. + +### Why is session an explicit parameter? + +A previous draft proposed that ClientSession would be a MongoClient-like object added to the object hierarchy: + +```javascript +session = client.startSession(...) +database = session.getDatabase(...) // database is associated with session +collection = database.getCollection(...) // collection is associated with session +// operations on collection implicitly use session +collection.insertOne({}) +session.endSession() +``` + +The central feature of this design is that a MongoCollection (or database, or perhaps a GridFS object) is associated +with a session, which is then an implied parameter to any operations executed using that MongoCollection. + +This API was rejected, with the justification that a ClientSession does not naturally belong to the state of a +MongoCollection. MongoCollection has up to now been a stable long-lived object that could be widely shared, and in most +drivers it is thread safe. Once we associate a ClientSession with it, the MongoCollection object becomes short-lived and +is no longer thread safe. It is a bad sign that MongoCollection's thread safety and lifetime vary depending on how its +parent MongoDatabase is created. + +Instead, we require users to pass session as a parameter to each function: + +```javascript +session = client.startSession(...) +database = client.getDatabase(...) +collection = database.getCollection(...) +// users must explicitly pass session to operations +collection.insertOne(session, {}) +session.endSession() +``` + +### Why does a network error cause the `ServerSession` to be discarded from the pool? + +When a network error is encountered when executing an operation with a `ClientSession`, the operation may be left +running on the server. Re-using this `ServerSession` can lead to parallel operations which violates the rule that a +session must be used sequentially. This results in multiple problems: + +1. killSessions to end an earlier operation would surprisingly also end a later operation. +2. An otherwise unrelated operation that just happens to use that same server session will potentially block waiting for + the previous operation to complete. For example, a transactional write will block a subsequent transactional write. + +### Why do automatic retry attempts re-use a dirty implicit session? + +The retryable writes spec requires that both the original and retry attempt use the same server session. The server will +block the retry attempt until the initial attempt completes at which point the retry attempt will continue executing. + +For retryable reads that use an implicit session, drivers could choose to use a new server session for the retry attempt +however this would lose the information that these two reads are related. + +### Why don't drivers run the endSessions command to cleanup dirty server sessions? + +Drivers do not run the endSessions command when discarding a dirty server session because disconnects should be +relatively rare and the server won't normally accumulate a large number of abandoned dirty sessions. Any abandoned +sessions will be automatically cleaned up by the server after the configured `logicalSessionTimeoutMinutes`. + +### Why must drivers wait to consume a server session until after a connection is checked out? + +The problem that may occur is when the number of concurrent application requests are larger than the number of available +connections, the driver may generate many more implicit sessions than connections. For example with maxPoolSize=1 and +100 threads, 100 implicit sessions may be created. This increases the load on the server since session state is cached +in memory. In the worst case this kind of workload can hit the session limit and trigger TooManyLogicalSessions. + +In order to address this, drivers MUST NOT consume a server session id until after the connection is checked out. This +change will limit the number of "in use" server sessions to no greater than an application's maxPoolSize. + +The language here is specific about obtaining a server session as opposed to creating the implicit session to permit +drivers to take an implementation approach where the implicit session creation logic largely remains unchanged. Implicit +session creation can be left as is, as long as the underlying server resource isn't allocated until it is needed and, +known it will be used, after connection checkout succeeds. + +It is still possible that via explicit sessions or cursors, which hold on to the session they started with, a driver +could over allocate sessions. But those scenarios are extenuating and outside the scope of solving in this spec. + +### Why should drivers NOT attempt to release a serverSession before checking back in the operation's connection? + +There are a variety of cases, such as retryable operations or cursor creating operations, where a `serverSession` must +remain acquired by the `ClientSession` after an operation is attempted. Attempting to account for all these scenarios +has risks that do not justify the potential guaranteed `ServerSession` allocation limiting. + +## Changelog + +- 2024-05-08: Migrated from reStructuredText to Markdown. +- 2017-09-13: If causalConsistency option is omitted assume true +- 2017-09-16: Omit session ID when opening and authenticating a connection +- 2017-09-18: Drivers MUST gossip the cluster time when they see a `$clusterTime`. +- 2017-09-19: How to safely use initialClusterTime +- 2017-09-29: Add an exception to the rule that `KILLCURSORS` commands always require a session id +- 2017-10-03: startSession and endSessions commands MUST be sent to the admin database +- 2017-10-03: Fix format of endSessions command +- 2017-10-04: Added advanceClusterTime +- 2017-10-06: Added descriptions of explicit and implicit sessions +- 2017-10-17: Implicit sessions MUST NOT be used when multiple users authenticated +- 2017-10-19: Possible race conditions when checking whether a deployment supports sessions +- 2017-11-21: Drivers MUST NOT send a session ID for unacknowledged writes +- 2018-01-10: Note that MongoClient must retain highest clusterTime +- 2018-01-10: Update test plan for drivers without APM +- 2018-01-11: Clarify that sessions require replica sets or sharded clusters +- 2018-02-20: Add implicit/explicit session tests +- 2018-02-20: Drivers SHOULD error if unacknowledged writes are used with sessions +- 2018-05-23: Drivers MUST not use session ID with parallelCollectionScan +- 2018-06-07: Document that estimatedDocumentCount does not support explicit sessions +- 2018-07-19: Justify why session must be an explicit parameter to each function +- 2018-10-11: Session pools must be cleared in child process after fork +- 2019-05-15: A ServerSession that is involved in a network error MUST be discarded +- 2019-10-22: Drivers may defer checking if a deployment supports sessions until the first +- 2021-04-08: Updated to use hello and legacy hello +- 2021-04-08: Adding in behaviour for load balancer mode. +- 2020-05-26: Simplify logic for determining sessions support +- 2022-01-28: Implicit sessions MUST obtain server session after connection checkout succeeds +- 2022-03-24: ServerSession Pooling is required and clarifies session acquisition bounding +- 2022-06-13: Move prose tests to test README and apply new ordering +- 2022-10-05: Remove spec front matter +- 2023-02-24: Defer checking for session support until after connection checkout diff --git a/source/sessions/driver-sessions.rst b/source/sessions/driver-sessions.rst index 493bcc8ff7..9cd2e55e32 100644 --- a/source/sessions/driver-sessions.rst +++ b/source/sessions/driver-sessions.rst @@ -1,1154 +1,4 @@ -============================= -Driver Sessions Specification -============================= -:Status: Accepted -:Minimum Server Version: 3.6 - -.. contents:: - --------- - -Abstract -======== - -Version 3.6 of the server introduces the concept of logical sessions for -clients. A session is an abstract concept that represents a set of sequential -operations executed by an application that are related in some way. This -specification is limited to how applications start and end sessions. Other -specifications define various ways in which sessions are used (e.g. causally -consistent reads, retryable writes, or transactions). - -This specification also discusses how drivers participate in distributing the -cluster time throughout a deployment, a process known as "gossipping the -cluster time". While gossipping the cluster time is somewhat orthogonal to -sessions, any driver that implements sessions MUST also implement gossipping -the cluster time, so it is included in this specification. - -Definitions -=========== - -META ----- - -The keywords “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”, -“SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be -interpreted as described in `RFC 2119 `_. - -Terms ------ - -ClientSession - The driver object representing a client session and the operations that can - be performed on it. Depending on the language a driver is written in this - might be an interface or a class. See also ``ServerSession``. - -Deployment - A set of servers that are all part of a single MongoDB cluster. We avoid the - word "cluster" because some people interpret "cluster" to mean "sharded cluster". - -Explicit session - A session that was started explicitly by the application by calling ``startSession`` - and passed as an argument to an operation. - -MongoClient - The root object of a driver's API. MAY be named differently in some drivers. - -Implicit session - A session that was started implicitly by the driver because the application - called an operation without providing an explicit session. - -MongoCollection - The driver object representing a collection and the operations that can be - performed on it. MAY be named differently in some drivers. - -MongoDatabase - The driver object representing a database and the operations that can be - performed on it. MAY be named differently in some drivers. - -ServerSession - The driver object representing a server session. This type is an - implementation detail and does not need to be public. See also - ``ClientSession``. - -Server session ID - A server session ID is a token used to identify a particular server - session. A driver can ask the server for a session ID using the - ``startSession`` command or it can generate one locally (see Generating a - Session ID locally). - -Session - A session is an abstract concept that represents a set of sequential - operations executed by an application that are related in some way. Other - specifications define the various ways in which operations can be related, - but examples include causally consistent reads and retryable writes. - -Topology - The current configuration and state of a deployment. - -Unacknowledged writes - Unacknowledged writes are write operations that are sent to the server - without waiting for a reply acknowledging the write. See the "When using - unacknowledged writes" section below for information on how unacknowledged - writes interact with sessions. - -Network error - Any network exception writing to or reading from a socket (e.g. a socket - timeout or error). - -Specification -============= - -Drivers currently have no concept of a session. The driver API will be expanded -to provide a way for applications to start and end sessions and to execute -operations in the context of a session. The goal is to expand the API in a way -that introduces no backward breaking changes. Existing applications that don't -use sessions don't need to be changed, and new applications that don't need -sessions can continue to be written using the existing API. - -To use sessions an application will call new (or overloaded) methods that take -a session parameter. - -Naming variations -================= - -This specification defines names for new methods and types. To the extent -possible, these names SHOULD be used by drivers. However, where a driver and/or -language's naming conventions differ, those naming conventions SHOULD be used. -For example, a driver might name a method ``StartSession`` or ``start_session`` instead -of ``startSession``, or might name a type ``client_session`` instead of ``ClientSession``. - -High level summary of the API changes for sessions -================================================== - -This section is just a high level summary of the new API. Details are provided -further on. - -Applications start a new session like this: - -.. code:: typescript - - options = new SessionOptions(/* various settings */); - session = client.startSession(options); - -The ``SessionOptions`` will be individually defined in several other -specifications. It is expected that the set of ``SessionOptions`` will grow over -time as sessions are used for new purposes. - -Applications use a session by passing it as an argument to operation methods. -For example: - -.. code:: typescript - - collection.InsertOne(session /* etc. */) - collection.UpdateOne(session /* etc. */) - -Applications end a session like this: - -.. code:: typescript - - session.endSession() - -This specification does not deal with multi-document transactions, which -are covered in `their own specification <../transactions/transactions.md>`_. - -MongoClient changes -=================== - -``MongoClient`` interface summary - -.. code:: java - - class SessionOptions { - // various other options as defined in other specifications - } - - interface MongoClient { - ClientSession startSession(SessionOptions options); - // other existing members of MongoClient - } - -Each new member is documented below. - -While it is not part of the public API, ``MongoClient`` also needs a private -(or internal) ``clusterTime`` member (containing either a BSON document or -null) to record the highest ``clusterTime`` observed in a deployment (as -described below in `Gossipping the cluster time`_). - -startSession ------------- - -The ``startSession`` method starts a new ``ClientSession`` with the provided options. - -It MUST NOT be possible to change the options provided to ``startSession`` after -``startSession`` has been called. This can be accomplished by making the -``SessionOptions`` class immutable or using some equivalent mechanism that is -idiomatic for your language. - -It is valid to call ``startSession`` with no options set. This will result in a -``ClientSession`` that has no effect on the operations performed in the context of -that session, other than to include a session ID in commands sent to the -server. - -The ``SessionOptions`` MAY be a strongly typed class in some drivers, or MAY be a -loosely typed dictionary in other drivers. Drivers MUST define ``SessionOptions`` -in such a way that new options can be added in a backward compatible way (it is -acceptable for backward compatibility to be at the source level). - -A ``ClientSession`` MUST be associated with a ``ServerSession`` at the time -``startSession`` is called. As an implementation optimization drivers MUST reuse -``ServerSession`` instances across multiple ``ClientSession`` instances subject -to the rule that a server session MUST NOT be used by two ``ClientSession`` -instances at the same time (see the Server Session Pool section). Additionally, -a ``ClientSession`` may only ever be associated with one ``ServerSession`` for -its lifetime. - -Drivers MUST NOT check for session support in `startSession`. Instead, if sessions -are not supported, the error MUST be reported the first time the session is used -for an operation (See `How to Tell Whether a Connection Supports Sessions`_). - -Explicit vs implicit sessions ------------------------------ - -An explicit session is one started explicitly by the application by calling -``startSession``. An implicit session is one started implicitly by the driver -because the application called an operation without providing an explicit -session. Internally, a driver must be able to distinguish between explicit and -implicit sessions, but no public API for this is necessary because an -application will never see an implicit session. - -The motivation for starting an implicit session for all methods that don't -take an explicit session parameter is to make sure that all commands that are -sent to the server are tagged with a session ID. This improves the ability of -an operations team to monitor (and kill if necessary) long running operations. -Tagging an operation with a session ID is specially useful if a deployment wide -operation needs to be killed. - -Authentication --------------- - -When using authentication, using a session requires that only a single user be -authenticated. Drivers that still support authenticating multiple users at once -MAY continue to do so, but MUST NOT allow sessions to be used under such -circumstances. - -If ``startSession`` is called when multiple users are authenticated drivers MUST -raise an error with the error message "Cannot call startSession when multiple -users are authenticated." - -If a driver allows authentication to be changed on the fly (presumably few -still do) the driver MUST either prevent ``ClientSession`` instances from being used with a -connection that doesn't have matching authentication or MUST return an error if -such use is attempted. - -ClientSession -============= - -``ClientSession`` instances are not thread safe or fork safe. They can only be -used by one thread or process at a time. - -Drivers MUST document the thread-safety and fork-safety limitations of sessions. -Drivers MUST NOT attempt to detect simultaneous use by multiple threads or -processes (see Q&A for the rationale). - -ClientSession interface summary: - -.. code:: java - - interface ClientSession { - MongoClient client; - Optional clusterTime; - SessionOptions options; - BsonDocument sessionId; - - void advanceClusterTime(BsonDocument clusterTime); - void endSession(); - } - -While it is not part of the public API, a ``ClientSession`` also has a private -(or internal) reference to a ``ServerSession``. - -Each member is documented below. - -client ------- - -This property returns the ``MongoClient`` that was used to start this -``ClientSession``. - -clusterTime ------------ - -This property returns the most recent cluster time seen by this session. If no -operations have been executed using this session this value will be null unless -``advanceClusterTime`` has been called. This value will also be null when a -cluster does not report cluster times. - -When a driver is gossiping the cluster time it should send the more recent -``clusterTime`` of the ``ClientSession`` and the ``MongoClient``. - -options -------- - -This property returns the ``SessionOptions`` that were used to start this -``ClientSession``. - -sessionId ---------- - -This property returns the session ID of this session. Note that since ``ServerSessions`` -are pooled, different ``ClientSession`` instances can have the same session ID, -but never at the same time. - -advanceClusterTime ------------------- - -This method advances the ``clusterTime`` for a session. If the new -``clusterTime`` is greater than the session's current ``clusterTime`` then the -session's ``clusterTime`` MUST be advanced to the new ``clusterTime``. If the -new ``clusterTime`` is less than or equal to the session's current -``clusterTime`` then the session's ``clusterTime`` MUST NOT be changed. - -This method MUST NOT advance the ``clusterTime`` in ``MongoClient`` because we -have no way of verifying that the supplied ``clusterTime`` is valid. If the -``clusterTime`` in ``MongoClient`` were set to an invalid value all future -operations with this ``MongoClient`` would result in the server returning an -error. The ``clusterTime`` in ``MongoClient`` should only be advanced with a -``$clusterTime`` received directly from a server. - -endSession ----------- - -This method ends a ``ClientSession``. - -In languages that have idiomatic ways of disposing of resources, drivers SHOULD -support that in addition to or instead of ``endSession``. For example, in the .NET -driver ``ClientSession`` would implement ``IDisposable`` and the application could -choose to call ``session.Dispose`` or put the session in a using statement instead -of calling ``session.endSession``. If your language has an idiomatic way of -disposing resources you MAY choose to implement that in addition to or instead -of ``endSession``, whichever is more idiomatic for your language. - -A driver MUST allow multiple calls to ``endSession`` (or ``Dispose``). All calls after -the first one are ignored. - -Conceptually, calling ``endSession`` implies ending the corresponding server -session (by calling the ``endSessions`` command). As an implementation detail -drivers SHOULD cache server sessions for reuse (see Server Session Pool). - -Once a ``ClientSession`` has ended, drivers MUST report an error if any operations -are attempted with that ``ClientSession``. - -ServerSession -============= - -A ``ServerSession`` is the driver object that tracks a server session. This object -is an implementation detail and does not need to be public. Drivers may store -this information however they choose; this data structure is defined here -merely to describe the operation of the server session pool. - -ServerSession interface summary - -.. code:: java - - interface ServerSession { - BsonDocument sessionId; - DateTime lastUse; - } - -sessionId ---------- - -This property returns the server session ID. - -lastUse -------- - -The driver MUST update the value of this property with the current DateTime -every time the server session ID is sent to the server. This allows the driver -to track with reasonable accuracy the server's view of when a server session -was last used. - -Creating a ServerSession ------------------------- - -When a driver needs to create a new ``ServerSession`` instance the only information -it needs is the session ID to use for the new session. It can either get the -session ID from the server by running the ``startSession`` command, or it can -generate it locally. - -In either case, the lastUse field of the ``ServerSession`` MUST be set to the -current time when the ``ServerSession`` is created. - -Generating a session ID locally -------------------------------- - -Running the ``startSession`` command to get a session ID for a new session requires -a round trip to the server. As an optimization the server allows drivers to -generate new session IDs locally and to just start using them. When a server -sees a new session ID that it has never seen before it simply assumes that it -is a new session. - -A session ID is a ``BsonDocument`` that has the following form: - -.. code:: typescript - - interface SessionId { - id: UUID - } - -Where the UUID is encoded as a BSON binary value of subtype 4. - -The id field of the session ID is a version 4 UUID that must comply with the -format described in RFC 4122. Section 4.4 describes an algorithm for generating -correctly-versioned UUIDs from a pseudo-random number generator. - -If a driver is unable to generate a version 4 UUID it MAY instead run the -``startSession`` command and let the server generate the session ID. - -MongoDatabase changes -===================== - -All ``MongoDatabase`` methods that talk to the server MUST send a session ID -with the command when connected to a deployment that supports sessions so that -the server can associate the operation with a session ID. - -New database methods that take an explicit session --------------------------------------------------- - -All ``MongoDatabase`` methods that talk to the server SHOULD be overloaded to -take an explicit session parameter. (See `why is session an explicit parameter?`_.) - -When overloading methods to take a session parameter, the session parameter -SHOULD be the first parameter. If overloading is not possible for your -language, it MAY be in a different position or MAY be embedded in an options -structure. - -Methods that have a session parameter MUST check that the session argument is -not null and was created by the same ``MongoClient`` that this ``MongoDatabase`` came -from and report an error if they do not match. - -Existing database methods that start an implicit session --------------------------------------------------------- - -When an existing ``MongoDatabase`` method that does not take a session is called, -the driver MUST behave as if a new ``ClientSession`` was started just for this one -operation and ended immediately after this operation completes. The actual -implementation will likely involve calling ``client.startSession``, but that is not -required by this spec. Regardless, please consult the startSession section to -replicate the required steps for creating a session. -The driver MUST NOT use the session if the checked out connection does not support sessions -(see `How to Tell Whether a Connection Supports Sessions`_) and, in all cases, MUST NOT consume a server -session id until after the connection is checked out and session support is confirmed. - -MongoCollection changes -======================= - -All ``MongoCollection`` methods that talk to the server MUST send a session ID -with the command when connected to a deployment that supports sessions so that -the server can associate the operation with a session ID. - -New collection methods that take an explicit session ----------------------------------------------------- - -All ``MongoCollection`` methods that talk to the server, with the exception of -`estimatedDocumentCount`, SHOULD be overloaded to take an explicit session -parameter. (See `why is session an explicit parameter?`_.) - -When overloading methods to take a session parameter, the session parameter -SHOULD be the first parameter. If overloading is not possible for your -language, it MAY be in a different position or MAY be embedded in an options -structure. - -Methods that have a session parameter MUST check that the session argument is -not null and was created by the same ``MongoClient`` that this ``MongoCollection`` came -from and report an error if they do not match. - -The `estimatedDocumentCount` helper does not support an explicit session -parameter. The underlying command, `count`, is not supported in a transaction, -so supporting an explicit session would likely confuse application developers. -The helper returns an estimate of the documents in a collection and -causal consistency is unlikely to improve the accuracy of the estimate. - -Existing collection methods that start an implicit session ----------------------------------------------------------- - -When an existing ``MongoCollection`` method that does not take a session is called, -the driver MUST behave as if a new ``ClientSession`` was started just for this one -operation and ended immediately after this operation completes. The actual -implementation will likely involve calling ``client.startSession``, but that is not -required by this spec. Regardless, please consult the startSession section to -replicate the required steps for creating a session. -The driver MUST NOT use the session if the checked out connection does not support sessions -(see `How to Tell Whether a Connection Supports Sessions`_) and, in all cases, MUST NOT consume a server -session id until after the connection is checked out and session support is confirmed. - -Sessions and Cursors -==================== - -When an operation using a session returns a cursor, all subsequent ``GETMORE`` -commands for that cursor MUST be run using the same session ID. - -If a driver decides to run a ``KILLCURSORS`` command on the cursor, it also MAY be -run using the same session ID. See the Exceptions below for when it is permissible to not -include a session ID in a ``KILLCURSORS`` command. - -Sessions and Connections -======================== - -To reduce the number of ``ServerSessions`` created, the driver MUST only obtain an implicit session's -``ServerSession`` after it successfully checks out a connection. -A driver SHOULD NOT attempt to release the acquired session before connection check in. - -Explicit sessions MAY be changed to allocate a server session similarly. - -How to Tell Whether a Connection Supports Sessions -=================================================== - -A driver can determine whether a connection supports sessions by checking whether -the ``logicalSessionTimeoutMinutes`` property of the establishing handshake response has -a value or not. If it has a value, sessions are supported. - -In the case of an explicit session, if sessions are not supported, the driver MUST raise an error. -In the case of an implicit session, if sessions are not supported, the driver MUST ignore the session. - -Possible race condition when checking for session support ---------------------------------------------------------- - -There is a possible race condition that can happen between the time the -driver checks whether sessions are supported and subsequently sends a command -to the server: - -* The server might have supported sessions at the time the connection was first - opened (and reported a value for logicalSessionTimeoutMinutes in the initial - response to the `handshake `_), - but have subsequently been downgraded to not support sessions. The server does - not close the socket in this scenario, so the driver will conclude that - the server at the other end of this connection supports sessions. - -There is nothing that the driver can do about this race condition, and the server -will just return an error in this scenario. - -Sending the session ID to the server on all commands -==================================================== - -When connected to a server that supports sessions a driver MUST append the -session ID to every command it sends to the server (with the exceptions noted -in the following section). It does this by adding a -top level ``lsid`` field to the command sent to the server. A driver MUST do this -without modifying any data supplied by the application (e.g. the command -document passed to runCommand).: - -.. code:: typescript - - interface ExampleCommandWithLSID { - foo: 1; - lsid: SessionId; - } - -Exceptions to sending the session ID to the server on all commands -================================================================== - -There are some exceptions to the rule that a driver MUST append the session ID to -every command it sends to the server. - -When opening and authenticating a connection --------------------------------------------- - -A driver MUST NOT append a session ID to any command sent during the process of -opening and authenticating a connection. - -When monitoring the state of a deployment ------------------------------------------ - -A driver MAY omit a session ID in hello and legacy hello commands sent solely -for the purposes of monitoring the state of a deployment. - -When sending a parallelCollectionScan command ---------------------------------------------- - -Sessions are designed for sequential operations and ``parallelCollectionScan`` -is designed for parallel operation. Because these are fundamentally -incompatible goals, drivers MUST NOT append session ID to the -``parallelCollectionScan`` command so that the resulting cursors have -no associated session ID and thus can be used in parallel. - -When sending a killCursors command ----------------------------------- - -A driver MAY omit a session ID in ``killCursors`` commands for two reasons. -First, ``killCursors`` is only ever sent to a particular server, so operation teams -wouldn't need the ``lsid`` for cluster-wide killOp. An admin can manually kill the op with -its operation id in the case that it is slow. Secondly, some drivers have a background -cursor reaper to kill cursors that aren't exhausted and closed. Due to GC semantics, -it can't use the same ``lsid`` for ``killCursors`` as was used for a cursor's ``find`` and ``getMore``, -so there's no point in using any ``lsid`` at all. - -When multiple users are authenticated and the session is implicit ------------------------------------------------------------------ - -The driver MUST NOT send a session ID from an implicit session when multiple -users are authenticated. If possible the driver MUST NOT start an implicit -session when multiple users are authenticated. Alternatively, if the driver -cannot determine whether multiple users are authenticated at the point in time -that an implicit session is started, then the driver MUST ignore any implicit -sessions that subsequently end up being used on a connection that has multiple -users authenticated. - -When using unacknowledged writes --------------------------------- - -A session ID MUST NOT be used simultaneously by more than one operation. Since -drivers don't wait for a response for an unacknowledged write a driver would -not know when the session ID could be reused. In theory a driver could use a -new session ID for each unacknowledged write, but that would result in many -orphaned sessions building up at the server. - -Therefore drivers MUST NOT send a session ID with unacknowledged writes under -any circumstances: - -* For unacknowledged writes with an explicit session, drivers SHOULD raise an - error. If a driver allows users to provide an explicit session with an - unacknowledged write (e.g. for backwards compatibility), the driver MUST NOT - send the session ID. - -* For unacknowledged writes without an explicit session, drivers SHOULD NOT use - an implicit session. If a driver creates an implicit session for - unacknowledged writes without an explicit session, the driver MUST NOT send - the session ID. - -Drivers MUST document the behavior of unacknowledged writes for both explicit -and implicit sessions. - -When wrapping commands in a ``$query`` field --------------------------------------------- - -If the driver is wrapping the command in a ``$query`` field for non-OP_MSG messages in order to pass a readPreference to a -mongos (see `ReadPreference and Mongos <./find_getmore_killcursors_commands.rst#readpreference-and-mongos>`_), -the driver SHOULD NOT add the ``lsid`` as a top-level field, and MUST add the ``lsid`` as a field of the ``$query`` - -.. code:: typescript - - // Wrapped command: - interface WrappedCommandExample { - $query: { - find: { foo: 1 } - }, - $readPreference: {} - } - - // Correct application of lsid - interface CorrectLSIDUsageExample { - $query: { - find: { foo: 1 }, - lsid: SessionId - }, - $readPreference: {} - } - - // Incorrect application of lsid - interface IncorrectLSIDUsageExample { - $query: { - find: { foo: 1 } - }, - $readPreference: {}, - lsid: SessionId - } - - -Server Commands -=============== - -startSession ------------- - -The ``startSession`` server command has the following format: - -.. code:: typescript - - interface StartSessionCommand { - startSession: 1; - $clusterTime?: ClusterTime; - } - -The ``$clusterTime`` field should only be sent when gossipping the cluster time. See the -section "Gossipping the cluster time" for information on ``$clusterTime``. - -The ``startSession`` command MUST be sent to the ``admin`` database. - -The server response has the following format: - -.. code:: typescript - - interface StartSessionResponse { - ok: 1; - id: BsonDocument; - } - -In case of an error, the server response has the following format: - -.. code:: typescript - - interface StartSessionError { - ok: 0; - errmsg: string; - code: number; - } - -When connected to a replica set the ``startSession`` command MUST be sent to the -primary if the primary is available. The ``startSession`` command MAY be sent to a -secondary if there is no primary available at the time the ``startSession`` command -needs to be run. - -Drivers SHOULD generate session IDs locally if possible instead of running the -``startSession`` command, since running the command requires a network round trip. - -endSessions ------------ - -The ``endSessions`` server command has the following format: - -.. code:: typescript - - interface EndSessionCommand { - endSessions: Array; - $clusterTime?: ClusterTime; - } - -The ``$clusterTime`` field should only be sent when gossipping the cluster time. See the -section of "Gossipping the cluster time" for information on ``$clusterTime``. - -The ``endSessions`` command MUST be sent to the ``admin`` database. - -The server response has the following format: - -.. code:: typescript - - interface EndSessionResponse { - ok: 1; - } - -In case of an error, the server response has the following format: - -.. code:: typescript - - interface EndSessionError { - ok: 0; - errmsg: string; - code: number; - } - -Drivers MUST ignore any errors returned by the ``endSessions`` command. - -The ``endSessions`` command MUST be run once when the ``MongoClient`` instance is shut down. -If the number of sessions is very large the ``endSessions`` command SHOULD be run -multiple times to end 10,000 sessions at a time (in order to avoid creating excessively large commands). - -When connected to a sharded cluster the ``endSessions`` command can be sent to any -mongos. When connected to a replica set the ``endSessions`` command MUST be sent to -the primary if the primary is available, otherwise it MUST be sent to any -available secondary. - -Server Session Pool -=================== - -Conceptually, each ``ClientSession`` can be thought of as having a new -corresponding ``ServerSession``. However, starting a server session might require a -round trip to the server (which can be avoided by generating the session ID -locally) and ending a session requires a separate round trip to the server. -Drivers can operate more efficiently and put less load on the server if they -cache ``ServerSession`` instances for reuse. To this end drivers MUST -implement a server session pool containing ``ServerSession`` instances -available for reuse. A ``ServerSession`` pool MUST belong to a ``MongoClient`` -instance and have the same lifetime as the ``MongoClient`` instance. - -When a new implicit ``ClientSession`` is started it MUST NOT attempt to acquire a server -session from the server session pool immediately. When a new explicit ``ClientSession`` is started -it MAY attempt to acquire a server session from the server session pool immediately. -See the algorithm below for the steps to follow when attempting to acquire a ``ServerSession`` from the server session pool. - -Note that ``ServerSession`` instances acquired from the server session pool might have as -little as one minute left before becoming stale and being discarded server -side. Drivers MUST document that if an application waits more than one minute -after calling ``startSession`` to perform operations with that session it risks -getting errors due to the server session going stale before it was used. - -A server session is considered stale by the server when it has not been used -for a certain amount of time. The default amount of time is 30 minutes, but -this value is configurable on the server. Servers that support sessions will -report this value in the ``logicalSessionTimeoutMinutes`` field of the reply -to the hello and legacy hello commands. The smallest reported timeout is recorded in the -``logicalSessionTimeoutMinutes`` property of the ``TopologyDescription``. See the -Server Discovery And Monitoring specification for details. - -When a ``ClientSession`` is ended it MUST return the server session to the server session pool. -See the algorithm below for the steps to follow when returning a ``ServerSession`` instance to the server -session pool. - -The server session pool has no maximum size. The pool only shrinks when a -server session is acquired for use or discarded. - -When a ``MongoClient`` instance is closed the driver MUST proactively inform the -server that the pooled server sessions will no longer be used by sending one or -more ``endSessions`` commands to the server. - -The server session pool is modeled as a double ended queue. The algorithms -below require the ability to add and remove ``ServerSession`` instances from the front of -the queue and to inspect and possibly remove ``ServerSession`` instances from the back of -the queue. The front of the queue holds ``ServerSession`` instances that have been released -recently and should be the first to be reused. The back of the queue holds -``ServerSession`` instances that have not been used recently and that potentially will be -discarded if they are not used again before they expire. - -An implicit session MUST be returned to the pool immediately following the completion of -an operation. When an implicit session is associated with a cursor for use with ``getMore`` -operations, the session MUST be returned to the pool immediately following a ``getMore`` -operation that indicates that the cursor has been exhausted. In particular, it MUST not wait -until all documents have been iterated by the application or until the application disposes -of the cursor. For language runtimes that provide the ability to attach finalizers to objects -that are run prior to garbage collection, the cursor class SHOULD return an implicit session -to the pool in the finalizer if the cursor has not already been exhausted. - -If a driver supports process forking, the session pool needs to be cleared on -one side of the forked processes (just like sockets need to reconnect). -Drivers MUST provide a way to clear the session pool without sending -``endSessions``. Drivers MAY make this automatic when the process ID changes. -If they do not, they MUST document how to clear the session pool wherever they -document fork support. After clearing the session pool in this way, drivers -MUST ensure that sessions already checked out are not returned to the new pool. - -If a driver has a server session pool and a network error is encountered when -executing any command with a ``ClientSession``, the driver MUST mark the -associated ``ServerSession`` as dirty. Dirty server sessions are discarded -when returned to the server session pool. It is valid for a dirty session to be -used for subsequent commands (e.g. an implicit retry attempt, a later command -in a bulk write, or a later operation on an explicit session), however, it MUST -remain dirty for the remainder of its lifetime regardless if later commands -succeed. - -Algorithm to acquire a ServerSession instance from the server session pool --------------------------------------------------------------------------- - -1. If the server session pool is empty create a new ``ServerSession`` and use it - -2. Otherwise remove a ``ServerSession`` from the front of the queue and examine it: - - * If the driver is in load balancer mode, use this ``ServerSession``. - * If it has at least one minute left before becoming stale use this ``ServerSession`` - * If it has less than one minute left before becoming stale discard it (let it be garbage collected) and return to step 1. - -See the `Load Balancer Specification <../load-balancers/load-balancers.md#session-expiration>`__ -for details on session expiration. - - -Algorithm to return a ServerSession instance to the server session pool ------------------------------------------------------------------------ - -1. Before returning a server session to the pool a driver MUST first check the - server session pool for server sessions at the back of the queue that are about - to expire (meaning they will expire in less than one minute). A driver MUST - stop checking server sessions once it encounters a server session that is not - about to expire. Any server sessions found that are about to expire are removed - from the end of the queue and discarded (or allowed to be garbage collected) - -2. Then examine the server session that is being returned to the pool and: - - * If this session is marked dirty (i.e. it was involved in a network error) - discard it (let it be garbage collected) - * If it will expire in less than one minute discard it - (let it be garbage collected) - * If it won't expire for at least one minute add it to the front of the queue - -Gossipping the cluster time -=========================== - -Drivers MUST gossip the cluster time when connected to a deployment that uses -cluster times. - -Gossipping the cluster time is a process in which the driver participates in -distributing the logical cluster time in a deployment. Drivers learn the -current cluster time (from a particular server's perspective) in responses -they receive from servers. Drivers in turn forward the highest cluster -time they have seen so far to any server they subsequently send commands -to. - -A driver detects that it MUST participate in gossipping the cluster time when it sees -a ``$clusterTime`` in a response received from a server. - -Receiving the current cluster time ----------------------------------- - -Drivers MUST examine all responses from the server -commands to see if they contain a top level field named ``$clusterTime`` formatted -as follows: - -.. code:: typescript - - interface ClusterTime { - clusterTime: Timestamp; - signature: { - hash: Binary; - keyId: Int64; - }; - } - - interface AnyServerResponse { - // ... other properties ... - $clusterTime: ClusterTime; - } - -Whenever a driver receives a cluster time from a server it MUST compare it to -the current highest seen cluster time for the deployment. If the new cluster time -is higher than the highest seen cluster time it MUST become the new highest -seen cluster time. Two cluster times are compared using only the BsonTimestamp -value of the ``clusterTime`` embedded field (be sure to include both the timestamp -and the increment of the BsonTimestamp in the comparison). The signature field -does not participate in the comparison. - -Sending the highest seen cluster time -------------------------------------- - -Whenever a driver sends a command to a server it MUST include the highest -seen cluster time in a top level field called ``$clusterTime``, in the same format -as it was received in (but see Gossipping with mixed server versions below). - -How to compute the $clusterTime to send to a server ---------------------------------------------------- - -When sending ``$clusterTime`` to the server the driver MUST send the greater of -the ``clusterTime`` values from ``MongoClient`` and ``ClientSession``. Normally -a session's ``clusterTime`` will be less than or equal to the ``clusterTime`` -in ``MongoClient``, but it could be greater than the ``clusterTime`` in -``MongoClient`` if ``advanceClusterTime`` was called with a ``clusterTime`` -that came from somewhere else. - -A driver MUST NOT use the ``clusterTime`` of a ``ClientSession`` anywhere else -except when executing an operation with this session. This rule protects the -driver from the scenario where ``advanceClusterTime`` was called with an -invalid ``clusterTime`` by limiting the resulting server errors to the one -session. The ``clusterTime`` of a ``MongoClient`` MUST NOT be advanced by any -``clusterTime`` other than a ``$clusterTime`` received directly from a server. - -The safe way to compute the ``$clusterTime`` to send to a server is: - -1. When the ``ClientSession`` is first started its ``clusterTime`` is set to -null. - -2. When the driver sends ``$clusterTime`` to the server it should send the -greater of the ``ClientSession`` ``clusterTime`` and the ``MongoClient`` -``clusterTime`` (either one could be null). - -3. When the driver receives a ``$clusterTime`` from the server it should advance -both the ``ClientSession`` and the ``MongoClient`` ``clusterTime``. The ``clusterTime`` -of a ``ClientSession`` can also be advanced by calling ``advanceClusterTime``. - -This sequence ensures that if the ``clusterTime`` of a ``ClientSession`` is invalid only that -one session will be affected. The ``MongoClient`` ``clusterTime`` is only -updated with ``$clusterTime`` values known to be valid because they were -received directly from a server. - -Tracking the highest seen cluster time does not require checking the deployment topology or the server version --------------------------------------------------------------------------------------------------------------- - -Drivers do not need to check the deployment topology or the server version they -are connected to in order to track the highest seen ``$clusterTime``. They simply -need to check for the presence of the ``$clusterTime`` field in responses received -from servers. - -Gossipping with mixed server versions -------------------------------------- - -Drivers MUST check that the server they are sending a command to supports -``$clusterTime`` before adding ``$clusterTime`` to the command. A server supports -``$clusterTime`` when the ``maxWireVersion`` >= 6. - -This supports the (presumably short lived) scenario where not all servers have -been upgraded to 3.6. - -Test Plan -========= - -See the `README `_ for tests. - -Motivation -========== - -Drivers currently have no concept of a session. The driver API needs to be -extended to support sessions. - -Design Rationale -================ - -The goal is to modify the driver API in such a way that existing programs that -don't use sessions continue to compile and run correctly. This goal is met by -defining new methods (or overloads) that take a session parameter. An -application does not need to be modified unless it wants to take advantage of -the new features supported by sessions. - -Backwards Compatibility -======================= - -The API changes to support sessions extend the existing API but do not -introduce any backward breaking changes. Existing programs that don't use -sessions continue to compile and run correctly. - -Reference Implementation (always required) -========================================== - -A reference implementation must be completed before any spec is given status -"Final", but it need not be completed before the spec is “Accepted”. While -there is merit to the approach of reaching consensus on the specification and -rationale before writing code, the principle of "rough consensus and running -code" is still useful when it comes to resolving many discussions of spec -details. A final reference implementation must include test code and -documentation. - -The C and C# drivers will do initial POC implementations. - -Future work (optional) -====================== - -Use this section to discuss any possible work for a future spec. This could -cover issues where no consensus could be reached but that don’t block this -spec, changes that were rejected due to unclear use cases, etc. - -Open questions -============== - -Q&A -=== - -Why do we say drivers MUST NOT attempt to detect unsafe multi-threaded or multi-process use of ``ClientSession``? ------------------------------------------------------------------------------------------------------------------ - -Because doing so would provide an illusion of safety. It doesn't make these -instances thread safe. And even if when testing an application no such exceptions -are encountered, that doesn't prove anything. The application might still be -using the instances in a thread-unsafe way and just didn't happen to do so during -a test run. The final argument is that checking this would require overhead -that doesn't provide any clear benefit. - -Why is session an explicit parameter? -------------------------------------- - -A previous draft proposed that ClientSession would be a MongoClient-like object added to the object hierarchy:: - - session = client.startSession(...) - database = session.getDatabase(...) // database is associated with session - collection = database.getCollection(...) // collection is associated with session - // operations on collection implicitly use session - collection.insertOne({}) - session.endSession() - -The central feature of this design is that a MongoCollection (or database, or perhaps a GridFS object) is associated with a session, which is then an implied parameter to any operations executed using that MongoCollection. - -This API was rejected, with the justification that a ClientSession does not naturally belong to the state of a MongoCollection. MongoCollection has up to now been a stable long-lived object that could be widely shared, and in most drivers it is thread safe. Once we associate a ClientSession with it, the MongoCollection object becomes short-lived and is no longer thread safe. It is a bad sign that MongoCollection's thread safety and lifetime vary depending on how its parent MongoDatabase is created. - -Instead, we require users to pass session as a parameter to each function:: - - session = client.startSession(...) - database = client.getDatabase(...) - collection = database.getCollection(...) - // users must explicitly pass session to operations - collection.insertOne(session, {}) - session.endSession() - -Why does a network error cause the ``ServerSession`` to be discarded from the pool? ------------------------------------------------------------------------------------ - -When a network error is encountered when executing an operation with a -``ClientSession``, the operation may be left running on the server. Re-using -this ``ServerSession`` can lead to parallel operations which violates the -rule that a session must be used sequentially. This results in multiple -problems: - -#. killSessions to end an earlier operation would surprisingly also end a - later operation. -#. An otherwise unrelated operation that just happens to use that same server - session will potentially block waiting for the previous operation to - complete. For example, a transactional write will block a subsequent - transactional write. - -Why do automatic retry attempts re-use a dirty implicit session? ----------------------------------------------------------------- - -The retryable writes spec requires that both the original and retry attempt -use the same server session. The server will block the retry attempt until the -initial attempt completes at which point the retry attempt will continue -executing. - -For retryable reads that use an implicit session, drivers could choose to use a -new server session for the retry attempt however this would lose the -information that these two reads are related. - -Why don't drivers run the endSessions command to cleanup dirty server sessions? -------------------------------------------------------------------------------- - -Drivers do not run the endSessions command when discarding a dirty server -session because disconnects should be relatively rare and the server won't -normally accumulate a large number of abandoned dirty sessions. Any abandoned -sessions will be automatically cleaned up by the server after the -configured ``logicalSessionTimeoutMinutes``. - - -Why must drivers wait to consume a server session until after a connection is checked out? ------------------------------------------------------------------------------------------- - -The problem that may occur is when the number of concurrent application requests are larger than the number of available connections, -the driver may generate many more implicit sessions than connections. -For example with maxPoolSize=1 and 100 threads, 100 implicit sessions may be created. -This increases the load on the server since session state is cached in memory. -In the worst case this kind of workload can hit the session limit and trigger TooManyLogicalSessions. - -In order to address this, drivers MUST NOT consume a server session id until after the connection is checked out. -This change will limit the number of "in use" server sessions to no greater than an application's maxPoolSize. - -The language here is specific about obtaining a server session as opposed to creating the implicit session -to permit drivers to take an implementation approach where the implicit session creation logic largely remains unchanged. -Implicit session creation can be left as is, as long as the underlying server resource isn't allocated until it -is needed and, known it will be used, after connection checkout succeeds. - -It is still possible that via explicit sessions or cursors, which hold on to the session they started with, a driver could over allocate sessions. -But those scenarios are extenuating and outside the scope of solving in this spec. - -Why should drivers NOT attempt to release a serverSession before checking back in the operation's connection? -------------------------------------------------------------------------------------------------------------- - -There are a variety of cases, such as retryable operations or cursor creating operations, -where a ``serverSession`` must remain acquired by the ``ClientSession`` after an operation is attempted. -Attempting to account for all these scenarios has risks that do not justify the potential guaranteed ``ServerSession`` allocation limiting. - -Changelog -========= - -:2017-09-13: If causalConsistency option is omitted assume true -:2017-09-16: Omit session ID when opening and authenticating a connection -:2017-09-18: Drivers MUST gossip the cluster time when they see a $clusterTime -:2017-09-19: How to safely use initialClusterTime -:2017-09-29: Add an exception to the rule that ``KILLCURSORS`` commands always require a session id -:2017-10-03: startSession and endSessions commands MUST be sent to the admin database -:2017-10-03: Fix format of endSessions command -:2017-10-04: Added advanceClusterTime -:2017-10-06: Added descriptions of explicit and implicit sessions -:2017-10-17: Implicit sessions MUST NOT be used when multiple users authenticated -:2017-10-19: Possible race conditions when checking whether a deployment supports sessions -:2017-11-21: Drivers MUST NOT send a session ID for unacknowledged writes -:2018-01-10: Note that MongoClient must retain highest clusterTime -:2018-01-10: Update test plan for drivers without APM -:2018-01-11: Clarify that sessions require replica sets or sharded clusters -:2018-02-20: Add implicit/explicit session tests -:2018-02-20: Drivers SHOULD error if unacknowledged writes are used with sessions -:2018-05-23: Drivers MUST not use session ID with parallelCollectionScan -:2018-06-07: Document that estimatedDocumentCount does not support explicit sessions -:2018-07-19: Justify why session must be an explicit parameter to each function -:2018-10-11: Session pools must be cleared in child process after fork -:2019-05-15: A ServerSession that is involved in a network error MUST be discarded -:2019-10-22: Drivers may defer checking if a deployment supports sessions until the first -:2021-04-08: Updated to use hello and legacy hello -:2021-04-08: Adding in behaviour for load balancer mode. -:2020-05-26: Simplify logic for determining sessions support -:2022-01-28: Implicit sessions MUST obtain server session after connection checkout succeeds -:2022-03-24: ServerSession Pooling is required and clarifies session acquisition bounding -:2022-06-13: Move prose tests to test README and apply new ordering -:2022-10-05: Remove spec front matter -:2023-02-24: Defer checking for session support until after connection checkout +.. note:: + This specification has been converted to Markdown and renamed to + `driver-sessions.md `_. diff --git a/source/sessions/snapshot-sessions.md b/source/sessions/snapshot-sessions.md new file mode 100644 index 0000000000..c34aa7b89c --- /dev/null +++ b/source/sessions/snapshot-sessions.md @@ -0,0 +1,243 @@ +# Snapshot Reads Specification + +- Status: Accepted +- Minimum Server Version: 5.0 + +______________________________________________________________________ + +## Abstract + +Version 5.0 of the server introduces support for read concern level "snapshot" (non-speculative) for read commands +outside of transactions, including on secondaries. This spec builds upon the +[Sessions Specification](./driver-sessions.md) to define how an application requests "snapshot" level read concern and +how a driver interacts with the server to implement snapshot reads. + +## Definitions + +### META + +The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and +"OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt). + +### Terms + +**ClientSession**\ +The driver object representing a client session and the operations that can be performed on it. + +**MongoClient**\ +The root object of a driver's API. MAY be named differently in some drivers. + +**MongoCollection**\ +The driver object representing a collection and the operations that can be performed on it. MAY be +named differently in some drivers. + +**MongoDatabase**\ +The driver object representing a database and the operations that can be performed on it. MAY be +named differently in some drivers. + +**ServerSession**\ +The driver object representing a server session. + +**Session**\ +A session is an abstract concept that represents a set of sequential operations executed by an application +that are related in some way. This specification defines how sessions are used to implement snapshot reads. + +**Snapshot reads**\ +Reads with read concern level `snapshot` that occur outside of transactions on both the primary and +secondary nodes, including in sharded clusters. Snapshots reads are majority committed reads. + +**Snapshot timestamp**\ +Snapshot timestamp, representing timestamp of the first supported read operation (i.e. +find/aggregate/distinct) in the session. The server creates a cursor in response to a snapshot find/aggregate command +and reports `atClusterTime` within the `cursor` field in the response. For the distinct command the server adds a +top-level `atClusterTime` field to the response. The `atClusterTime` field represents the timestamp of the read and is +guaranteed to be majority committed. + +## Specification + +An application requests snapshot reads by creating a `ClientSession` with options that specify that snapshot reads are +desired. An application then passes the session as an argument to methods in the `MongoDatabase` and `MongoCollection` +classes. Read operations (find/aggregate/distinct) performed against that session will be read from the same snapshot. + +## High level summary of the API changes for snapshot reads + +Snapshot reads are built on top of client sessions. + +Applications will start a new client session for snapshot reads like this: + +```typescript +options = new SessionOptions(snapshot = true); +session = client.startSession(options); +``` + +All read operations performed using this session will be read from the same snapshot. + +If no value is provided for `snapshot` a value of false is implied. There are no MongoDatabase, MongoClient, or +MongoCollection API changes. + +## SessionOptions changes + +`SessionOptions` change summary + +```typescript +class SessionOptions { + Optional snapshot; + + // other options defined by other specs +} +``` + +In order to support snapshot reads a new property named `snapshot` is added to `SessionOptions`. Applications set +`snapshot` when starting a client session to indicate whether they want snapshot reads. All read operations performed +using that client session will share the same snapshot. + +Each new member is documented below. + +### snapshot + +Applications set `snapshot` when starting a session to indicate whether they want snapshot reads. + +Note that the `snapshot` property is optional. The default value of this property is false. + +Snapshot reads and causal consistency are mutually exclusive. Therefore if `snapshot` is set to true, +`causalConsistency` must be false. Client MUST throw an error if both `snapshot` and `causalConsistency` are set to +true. Snapshot reads are supported on both primaries and secondaries. + +## ClientSession changes + +Transactions are not allowed with snapshot sessions. Calling `session.startTransaction(options)` on a snapshot session +MUST raise an error. + +## ReadConcern changes + +`snapshot` added to [ReadConcernLevel enumeration](../read-write-concern/read-write-concern.rst#read-concern). + +## Server Commands + +There are no new server commands related to snapshot reads. Instead, snapshot reads are implemented by: + +1. Saving the `atClusterTime` returned by 5.0+ servers for the first find/aggregate/distinct operation in a private + `snapshotTime` property of the `ClientSession` object. Drivers MUST save `atClusterTime` in the `ClientSession` + object. +2. Passing that `snapshotTime` in the `atClusterTime` field of the `readConcern` field for subsequent snapshot read + operations (i.e. find/aggregate/distinct commands). + +## Server Command Responses + +For find/aggregate commands the server returns `atClusterTime` within the `cursor` field of the response. + +```typescript +{ + ok : 1 or 0, + ... // the rest of the command reply + cursor : { + ... // the rest of the cursor reply + atClusterTime : + } +} +``` + +For distinct commands the server returns `atClusterTime` as a top-level field in the response. + +```typescript +{ + ok : 1 or 0, + ... // the rest of the command reply + atClusterTime : +} +``` + +The `atClusterTime` timestamp MUST be stored in the `ClientSession` to later be passed as the `atClusterTime` field of +the `readConcern` with a `snapshot` level in subsequent read operations. + +## Server Errors + +1. The server may reply to read commands with a `SnapshotTooOld(239)` error if the client's `atClusterTime` value is not + available in the server's history. +2. The server will return `InvalidOptions(72)` error if both `atClusterTime` and `afterClusterTime` options are set to + true. +3. The server will return `InvalidOptions(72)` error if the command does not support readConcern.level "snapshot". + +## Snapshot Read Commands + +For snapshot reads the driver MUST first obtain `atClusterTime` from the server response of a find/aggregate/distinct +command, by specifying `readConcern` with `snapshot` level field, and store it as `snapshotTime` in the `ClientSession` +object. + +```typescript +{ + find : , // or other read command + ... // the rest of the command parameters + readConcern : + { + level : "snapshot" + } +} +``` + +For subsequent reads in the same session, the driver MUST send the `snapshotTime` saved in the `ClientSession` as the +value of the `atClusterTime` field of the `readConcern` with a `snapshot` level: + +```typescript +{ + find : , // or other read command + ... // the rest of the command parameters + readConcern : + { + level : "snapshot", + atClusterTime : + } +} +``` + +Lists of commands that support snapshot reads: + +1. find +2. aggregate +3. distinct + +## Sending readConcern to the server on all commands + +Drivers MUST set the readConcern `level` and `atClusterTime` fields (as outlined above) on all commands in a snapshot +session, including commands that do not accept a readConcern (e.g. insert, update). This ensures that the server will +return an error for invalid operations, such as writes, within a session configured for snapshot reads. + +## Requires MongoDB 5.0+ + +Snapshot reads require MongoDB 5.0+. When the connected server's maxWireVersion is less than 13, drivers MUST throw an +exception with the message "Snapshot reads require MongoDB 5.0 or later". + +## Motivation + +To support snapshot reads. Only supported with server version 5.0+ or newer. + +## Design Rationale + +The goal is to modify the driver API as little as possible so that existing programs that don't need snapshot reads +don't have to be changed. This goal is met by defining a `SessionOptions` field that applications use to start a +`ClientSession` that can be used for snapshot reads. Alternative explicit approach of obtaining `atClusterTime` from +`cursor` object and passing it to read concern object was considered initially. A session-based approach was chosen as +it aligns better with the existing API, and requires minimal API changes. Future extensibility for snapshot reads would +be best served by a session-based approach, as no API changes will be required. + +## Backwards Compatibility + +The API changes to support snapshot reads extend the existing API but do not introduce any backward breaking changes. +Existing programs that don't use snapshot reads continue to compile and run correctly. + +## Reference Implementation + +C# driver will provide the reference implementation. The corresponding ticket is +[CSHARP-3668](https://jira.mongodb.org/browse/CSHARP-3668). + +## Q&A + +## Changelog + +- 2024-05-08: Migrated from reStructuredText to Markdown. +- 2021-06-15: Initial version. +- 2021-06-28: Raise client side error on \< 5.0. +- 2021-06-29: Send readConcern with all snapshot session commands. +- 2021-07-16: Grammar revisions. Change SHOULD to MUST for startTransaction error to comply with existing tests. +- 2021-08-09: Updated client-side error spec tests to use correct syntax for `test.expectEvents` +- 2022-10-05: Remove spec front matter diff --git a/source/sessions/snapshot-sessions.rst b/source/sessions/snapshot-sessions.rst index ffa9ceeb94..244a49ce25 100644 --- a/source/sessions/snapshot-sessions.rst +++ b/source/sessions/snapshot-sessions.rst @@ -1,287 +1,4 @@ -============================ -Snapshot Reads Specification -============================ -:Status: Accepted -:Minimum Server Version: 5.0 - -.. contents:: - --------- - -Abstract -======== - -Version 5.0 of the server introduces support for read concern level "snapshot" (non-speculative) -for read commands outside of transactions, including on secondaries. -This spec builds upon the `Sessions Specification <../driver-sessions.rst>`_ to define how an application -requests "snapshot" level read concern and how a driver interacts with the server -to implement snapshot reads. - -Definitions -=========== - -META ----- - -The keywords “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”, -“SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be -interpreted as described in `RFC 2119 `_. - -Terms ------ - -ClientSession - The driver object representing a client session and the operations that can be - performed on it. - -MongoClient - The root object of a driver's API. MAY be named differently in some drivers. - -MongoCollection - The driver object representing a collection and the operations that can be - performed on it. MAY be named differently in some drivers. - -MongoDatabase - The driver object representing a database and the operations that can be - performed on it. MAY be named differently in some drivers. - -ServerSession - The driver object representing a server session. - -Session - A session is an abstract concept that represents a set of sequential - operations executed by an application that are related in some way. This - specification defines how sessions are used to implement snapshot reads. - -Snapshot reads - Reads with read concern level ``snapshot`` that occur outside of transactions on - both the primary and secondary nodes, including in sharded clusters. - Snapshots reads are majority committed reads. - -Snapshot timestamp - Snapshot timestamp, representing timestamp of the first supported read operation (i.e. find/aggregate/distinct) in the session. - The server creates a cursor in response to a snapshot find/aggregate command and - reports ``atClusterTime`` within the ``cursor`` field in the response. For the distinct command the server adds a top-level ``atClusterTime`` field to the response. - The ``atClusterTime`` field represents the timestamp of the read and is guaranteed to be majority committed. - -Specification -============= - -An application requests snapshot reads by creating a ``ClientSession`` -with options that specify that snapshot reads are desired. An -application then passes the session as an argument to methods in the -``MongoDatabase`` and ``MongoCollection`` classes. Read operations (find/aggregate/distinct) performed against -that session will be read from the same snapshot. - -High level summary of the API changes for snapshot reads -======================================================== - -Snapshot reads are built on top of client sessions. - -Applications will start a new client session for snapshot reads like -this: - -.. code:: typescript - - options = new SessionOptions(snapshot = true); - session = client.startSession(options); - -All read operations performed using this session will be read from the same snapshot. - -If no value is provided for ``snapshot`` a value of false is -implied. -There are no MongoDatabase, MongoClient, or MongoCollection API changes. - -SessionOptions changes -====================== - -``SessionOptions`` change summary - -.. code:: typescript - - class SessionOptions { - Optional snapshot; - - // other options defined by other specs - } - -In order to support snapshot reads a new property named -``snapshot`` is added to ``SessionOptions``. Applications set -``snapshot`` when starting a client session to indicate -whether they want snapshot reads. All read operations performed -using that client session will share the same snapshot. - -Each new member is documented below. - -snapshot --------- - -Applications set ``snapshot`` when starting a session to -indicate whether they want snapshot reads. - -Note that the ``snapshot`` property is optional. The default value of -this property is false. - -Snapshot reads and causal consistency are mutually exclusive. Therefore if ``snapshot`` is set to true, -``causalConsistency`` must be false. Client MUST throw an error if both ``snapshot`` and ``causalConsistency`` are set to true. -Snapshot reads are supported on both primaries and secondaries. - -ClientSession changes -===================== - -Transactions are not allowed with snapshot sessions. -Calling ``session.startTransaction(options)`` on a snapshot session MUST raise an error. - -ReadConcern changes -=================== - -``snapshot`` added to `ReadConcernLevel enumeration <../read-write-concern/read-write-concern.rst#read-concern>`_. - -Server Commands -=============== - -There are no new server commands related to snapshot reads. Instead, -snapshot reads are implemented by: - -1. Saving the ``atClusterTime`` returned by 5.0+ servers for the first find/aggregate/distinct operation in a - private ``snapshotTime`` property of the ``ClientSession`` object. Drivers MUST save ``atClusterTime`` - in the ``ClientSession`` object. - -2. Passing that ``snapshotTime`` in the ``atClusterTime`` field of the ``readConcern`` field - for subsequent snapshot read operations (i.e. find/aggregate/distinct commands). - -Server Command Responses -======================== - -For find/aggregate commands the server returns ``atClusterTime`` within the ``cursor`` -field of the response. - -.. code:: typescript - - { - ok : 1 or 0, - ... // the rest of the command reply - cursor : { - ... // the rest of the cursor reply - atClusterTime : - } - } - -For distinct commands the server returns ``atClusterTime`` as a top-level field in the -response. - -.. code:: typescript - - { - ok : 1 or 0, - ... // the rest of the command reply - atClusterTime : - } - -The ``atClusterTime`` timestamp MUST be stored in the ``ClientSession`` to later be passed as the -``atClusterTime`` field of the ``readConcern`` with a ``snapshot`` level in subsequent read operations. - -Server Errors -============= -1. The server may reply to read commands with a ``SnapshotTooOld(239)`` error if the client's ``atClusterTime`` value is not available in the server's history. -2. The server will return ``InvalidOptions(72)`` error if both ``atClusterTime`` and ``afterClusterTime`` options are set to true. -3. The server will return ``InvalidOptions(72)`` error if the command does not support readConcern.level "snapshot". - -Snapshot Read Commands -====================== - -For snapshot reads the driver MUST first obtain ``atClusterTime`` from the server response of a find/aggregate/distinct command, -by specifying ``readConcern`` with ``snapshot`` level field, and store it as ``snapshotTime`` in the -``ClientSession`` object. - -.. code:: typescript - - { - find : , // or other read command - ... // the rest of the command parameters - readConcern : - { - level : "snapshot" - } - } - -For subsequent reads in the same session, the driver MUST send the ``snapshotTime`` saved in -the ``ClientSession`` as the value of the ``atClusterTime`` field of the -``readConcern`` with a ``snapshot`` level: - -.. code:: typescript - - { - find : , // or other read command - ... // the rest of the command parameters - readConcern : - { - level : "snapshot", - atClusterTime : - } - } - -Lists of commands that support snapshot reads: - -1. find -2. aggregate -3. distinct - -Sending readConcern to the server on all commands -================================================= - -Drivers MUST set the readConcern ``level`` and ``atClusterTime`` fields (as -outlined above) on all commands in a snapshot session, including commands that -do not accept a readConcern (e.g. insert, update). This ensures that the server -will return an error for invalid operations, such as writes, within a session -configured for snapshot reads. - -Requires MongoDB 5.0+ -===================== - -Snapshot reads require MongoDB 5.0+. When the connected server's -maxWireVersion is less than 13, drivers MUST throw an exception with the -message "Snapshot reads require MongoDB 5.0 or later". - -Motivation -========== - -To support snapshot reads. Only supported with server version 5.0+ or newer. - -Design Rationale -================ - -The goal is to modify the driver API as little as possible so that existing -programs that don't need snapshot reads don't have to be changed. -This goal is met by defining a ``SessionOptions`` field that applications use to -start a ``ClientSession`` that can be used for snapshot reads. Alternative explicit approach of -obtaining ``atClusterTime`` from ``cursor`` object and passing it to read concern object was considered initially. -A session-based approach was chosen as it aligns better with the existing API, and requires minimal API changes. -Future extensibility for snapshot reads would be best served by a session-based approach, as no API changes will be required. - -Backwards Compatibility -======================= - -The API changes to support snapshot reads extend the existing API but do not -introduce any backward breaking changes. Existing programs that don't use -snapshot reads continue to compile and run correctly. - -Reference Implementation -======================== - -C# driver will provide the reference implementation. -The corresponding ticket is `CSHARP-3668 `_. - -Q&A -=== - -Changelog -========= - -:2021-06-15: Initial version. -:2021-06-28: Raise client side error on < 5.0. -:2021-06-29: Send readConcern with all snapshot session commands. -:2021-07-16: Grammar revisions. Change SHOULD to MUST for startTransaction error to comply with existing tests. -:2021-08-09: Updated client-side error spec tests to use correct syntax for ``test.expectEvents`` -:2022-10-05: Remove spec front matter +.. note:: + This specification has been converted to Markdown and renamed to + `snapshot-sessions.md `_. diff --git a/source/sessions/tests/README.md b/source/sessions/tests/README.md new file mode 100644 index 0000000000..218e481a2f --- /dev/null +++ b/source/sessions/tests/README.md @@ -0,0 +1,249 @@ +# Driver Session Tests + +______________________________________________________________________ + +## Introduction + +The YAML and JSON files in this directory are platform-independent tests meant to exercise a driver's implementation of +sessions. These tests utilize the [Unified Test Format](../../unified-test-format/unified-test-format.md). + +### Snapshot session tests + +The default snapshot history window on the server is 5 minutes. Running the test in debug mode, or in any other slow +configuration may lead to `SnapshotTooOld` errors. Drivers can work around this issue by increasing the server's +`minSnapshotHistoryWindowInSeconds` parameter, for example: + +```python +client.admin.command('setParameter', 1, minSnapshotHistoryWindowInSeconds=600) +``` + +### Testing against servers that do not support sessions + +Since all regular 3.6+ servers support sessions, the prose tests which test for session non-support SHOULD use a +mongocryptd server as the test server (available with server versions 4.2+); however, if future versions of mongocryptd +support sessions or if mongocryptd is not a viable option for the driver implementing these tests, another server MAY be +substituted as long as it does not return a non-null value for `logicalSessionTimeoutMinutes`; in the event that no such +server is readily available, a mock server may be used as a last resort. + +As part of the test setup for these cases, create a `MongoClient` pointed at the test server with the options specified +in the test case and verify that the test server does NOT define a value for `logicalSessionTimeoutMinutes` by sending a +hello command and checking the response. + +## Prose tests + +### 1. Setting both `snapshot` and `causalConsistency` to true is not allowed + +Snapshot sessions tests require server of version 5.0 or higher and replica set or a sharded cluster deployment. + +- `client.startSession(snapshot = true, causalConsistency = true)` +- Assert that an error was raised by driver + +### 2. Pool is LIFO + +This test applies to drivers with session pools. + +- Call `MongoClient.startSession` twice to create two sessions, let us call them `A` and `B`. +- Call `A.endSession`, then `B.endSession`. +- Call `MongoClient.startSession`: the resulting session must have the same session ID as `B`. +- Call `MongoClient.startSession` again: the resulting session must have the same session ID as `A`. + +### 3. `$clusterTime` in commands + +- Turn `heartbeatFrequencyMS` up to a very large number. +- Register a command-started and a command-succeeded APM listener. If the driver has no APM support, inspect + commands/replies in another idiomatic way, such as monkey-patching or a mock server. +- Send a `ping` command to the server with the generic `runCommand` method. +- Assert that the command passed to the command-started listener includes `$clusterTime` if and only if `maxWireVersion` + \>= 6. +- Record the `$clusterTime`, if any, in the reply passed to the command-succeeded APM listener. +- Send another `ping` command. +- Assert that `$clusterTime` in the command passed to the command-started listener, if any, equals the `$clusterTime` in + the previous server reply. (Turning `heartbeatFrequencyMS` up prevents an intervening heartbeat from advancing the + `$clusterTime` between these final two steps.) + +Repeat the above for: + +- An aggregate command from the `aggregate` helper method +- A find command from the `find` helper method +- An insert command from the `insert_one` helper method + +### 4. Explicit and implicit session arguments + +- Register a command-started APM listener. If the driver has no APM support, inspect commands in another idiomatic way, + such as monkey-patching or a mock server. +- Create `client1` +- Get `database` from `client1` +- Get `collection` from `database` +- Start `session` from `client1` +- Call `collection.insertOne(session,...)` +- Assert that the command passed to the command-started listener contained the session `lsid` from `session`. +- Call `collection.insertOne(,...)` (*without* a session argument) +- Assert that the command passed to the command-started listener contained a session `lsid`. + +Repeat the above for all methods that take a session parameter. + +### 5. Session argument is for the right client + +- Create `client1` and `client2` +- Get `database` from `client1` +- Get `collection` from `database` +- Start `session` from `client2` +- Call `collection.insertOne(session,...)` +- Assert that an error was reported because `session` was not started from `client1` + +Repeat the above for all methods that take a session parameter. + +### 6. No further operations can be performed using a session after `endSession` has been called + +- Start a `session` +- End the `session` +- Call `collection.InsertOne(session, ...)` +- Assert that the proper error was reported + +Repeat the above for all methods that take a session parameter. + +If your driver implements a platform dependent idiomatic disposal pattern, test that also (if the idiomatic disposal +pattern calls `endSession` it would be sufficient to only test the disposal pattern since that ends up calling +`endSession`). + +### 7. Authenticating as multiple users suppresses implicit sessions + +Skip this test if your driver does not allow simultaneous authentication with multiple users. + +- Authenticate as two users +- Call `findOne` with no explicit session +- Capture the command sent to the server +- Assert that the command sent to the server does not have an `lsid` field + +### 8. Client-side cursor that exhausts the results on the initial query immediately returns the implicit session to the pool + +- Insert two documents into a collection +- Execute a find operation on the collection and iterate past the first document +- Assert that the implicit session is returned to the pool. This can be done in several ways: + - Track in-use count in the server session pool and assert that the count has dropped to zero + - Track the lsid used for the find operation (e.g. with APM) and then do another operation and assert that the same + lsid is used as for the find operation. + +### 9. Client-side cursor that exhausts the results after a `getMore` immediately returns the implicit session to the pool + +- Insert five documents into a collection +- Execute a find operation on the collection with batch size of 3 +- Iterate past the first four documents, forcing the final `getMore` operation +- Assert that the implicit session is returned to the pool prior to iterating past the last document + +### 10. No remaining sessions are checked out after each functional test + +At the end of every individual functional test of the driver, there SHOULD be an assertion that there are no remaining +sessions checked out from the pool. This may require changes to existing tests to ensure that they close any explicit +client sessions and any unexhausted cursors. + +### 11. For every combination of topology and readPreference, ensure that `find` and `getMore` both send the same session id + +- Insert three documents into a collection +- Execute a `find` operation on the collection with a batch size of 2 +- Assert that the server receives a non-zero lsid +- Iterate through enough documents (3) to force a `getMore` +- Assert that the server receives a non-zero lsid equal to the lsid that `find` sent. + +### 12. Session pool can be cleared after forking without calling `endSession` + +Skip this test if your driver does not allow forking. + +- Create ClientSession +- Record its lsid +- Delete it (so the lsid is pushed into the pool) +- Fork +- In the parent, create a ClientSession and assert its lsid is the same. +- In the child, create a ClientSession and assert its lsid is different. + +### 13. Existing sessions are not checked into a cleared pool after forking + +Skip this test if your driver does not allow forking. + +- Create ClientSession +- Record its lsid +- Fork +- In the parent, return the ClientSession to the pool, create a new ClientSession, and assert its lsid is the same. +- In the child, return the ClientSession to the pool, create a new ClientSession, and assert its lsid is different. + +### 14. Implicit sessions only allocate their server session after a successful connection checkout + +- Create a MongoClient with the following options: `maxPoolSize=1` and `retryWrites=true`. If testing against a sharded + deployment, the test runner MUST ensure that the MongoClient connects to only a single mongos host. +- Attach a command started listener that collects each command's lsid +- Initiate the following concurrent operations + - `insertOne({ }),` + - `deleteOne({ }),` + - `updateOne({ }, { $set: { a: 1 } }),` + - `bulkWrite([{ updateOne: { filter: { }, update: { $set: { a: 1 } } } }]),` + - `findOneAndDelete({ }),` + - `findOneAndUpdate({ }, { $set: { a: 1 } }),` + - `findOneAndReplace({ }, { a: 1 }),` + - `find().toArray()` +- Wait for all operations to complete successfully +- Assert the following across at least 5 retries of the above test: + - Drivers MUST assert that exactly one session is used for all operations at least once across the retries of this + test. + - Note that it's possible, although rare, for >1 server session to be used because the session is not released until + after the connection is checked in. + - Drivers MUST assert that the number of allocated sessions is strictly less than the number of concurrent operations + in every retry of this test. In this instance it would be less than (but NOT equal to) 8. + +### 15. `lsid` is added inside `$query` when using OP_QUERY + +This test only applies to drivers that have not implemented OP_MSG and still use OP_QUERY. + +- For a command to a mongos that includes a readPreference, verify that the `lsid` on query commands is added inside the + `$query` field, and NOT as a top-level field. + +### 16. Authenticating as a second user after starting a session results in a server error + +This test only applies to drivers that allow authentication to be changed on the fly. + +- Authenticate as the first user +- Start a session by calling `startSession` +- Authenticate as a second user +- Call `findOne` using the session as an explicit session +- Assert that the driver returned an error because multiple users are authenticated + +### 17. Driver verifies that the session is owned by the current user + +This test only applies to drivers that allow authentication to be changed on the fly. + +- Authenticate as user A +- Start a session by calling `startSession` +- Logout user A +- Authenticate as user B +- Call `findOne` using the session as an explicit session +- Assert that the driver returned an error because the session is owned by a different user + +### 18. Implicit session is ignored if connection does not support sessions + +Refer to [Testing against servers that do not support sessions](#testing-against-servers-that-do-not-support-sessions) +and configure a `MongoClient` with command monitoring enabled. + +- Send a read command to the server (e.g., `findOne`), ignoring any errors from the server response +- Check the corresponding `commandStarted` event: verify that `lsid` is not set +- Send a write command to the server (e.g., `insertOne`), ignoring any errors from the server response +- Check the corresponding `commandStarted` event: verify that lsid is not set + +### 19. Explicit session raises an error if connection does not support sessions + +Refer to [Testing against servers that do not support sessions](#testing-against-servers-that-do-not-support-sessions) +and configure a `MongoClient` with default options. + +- Create a new explicit session by calling `startSession` (this MUST NOT error) +- Attempt to send a read command to the server (e.g., `findOne`) with the explicit session passed in +- Assert that a client-side error is generated indicating that sessions are not supported +- Attempt to send a write command to the server (e.g., `insertOne`) with the explicit session passed in +- Assert that a client-side error is generated indicating that sessions are not supported + +## Changelog + +- 2024-05-08: Migrated from reStructuredText to Markdown. +- 2019-05-15: Initial version. +- 2021-06-15: Added snapshot-session tests. Introduced legacy and unified folders. +- 2021-07-30: Use numbering for prose test +- 2022-02-11: Convert legacy tests to unified format +- 2022-06-13: Relocate prose test from spec document and apply new ordering +- 2023-02-24: Fix formatting and add new prose tests 18 and 19 diff --git a/source/sessions/tests/README.rst b/source/sessions/tests/README.rst deleted file mode 100644 index 51efce8009..0000000000 --- a/source/sessions/tests/README.rst +++ /dev/null @@ -1,276 +0,0 @@ -==================== -Driver Session Tests -==================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests -meant to exercise a driver's implementation of sessions. These tests utilize the -`Unified Test Format <../../unified-test-format/unified-test-format.md>`__. - -Snapshot session tests -~~~~~~~~~~~~~~~~~~~~~~ -The default snapshot history window on the server is 5 minutes. Running the test in debug mode, or in any other slow configuration -may lead to `SnapshotTooOld` errors. Drivers can work around this issue by increasing the server's `minSnapshotHistoryWindowInSeconds` parameter, for example: - -.. code:: python - - client.admin.command('setParameter', 1, minSnapshotHistoryWindowInSeconds=600) - -Testing against servers that do not support sessions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Since all regular 3.6+ servers support sessions, the prose tests which test for session non-support SHOULD -use a mongocryptd server as the test server (available with server versions 4.2+); however, if future versions of mongocryptd -support sessions or if mongocryptd is not a viable option for the driver implementing these tests, another server MAY be -substituted as long as it does not return a non-null value for ``logicalSessionTimeoutMinutes``; -in the event that no such server is readily available, a mock server may be used as a last resort. - -As part of the test setup for these cases, create a ``MongoClient`` pointed at the test server with the options -specified in the test case and verify that the test server does NOT define a value for ``logicalSessionTimeoutMinutes`` -by sending a hello command and checking the response. - -Prose tests -=========== - -1. Setting both ``snapshot`` and ``causalConsistency`` to true is not allowed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Snapshot sessions tests require server of version 5.0 or higher and -replica set or a sharded cluster deployment. - -* ``client.startSession(snapshot = true, causalConsistency = true)`` -* Assert that an error was raised by driver - -2. Pool is LIFO -~~~~~~~~~~~~~~~ - -This test applies to drivers with session pools. - -* Call ``MongoClient.startSession`` twice to create two sessions, let us call them ``A`` and ``B``. -* Call ``A.endSession``, then ``B.endSession``. -* Call ``MongoClient.startSession``: the resulting session must have the same session ID as ``B``. -* Call ``MongoClient.startSession`` again: the resulting session must have the same session ID as ``A``. - -3. ``$clusterTime`` in commands -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Turn ``heartbeatFrequencyMS`` up to a very large number. -* Register a command-started and a command-succeeded APM listener. If the driver has no APM support, inspect commands/replies in another idiomatic way, such as monkey-patching or a mock server. -* Send a ``ping`` command to the server with the generic ``runCommand`` method. -* Assert that the command passed to the command-started listener includes ``$clusterTime`` if and only if ``maxWireVersion`` >= 6. -* Record the ``$clusterTime``, if any, in the reply passed to the command-succeeded APM listener. -* Send another ``ping`` command. -* Assert that ``$clusterTime`` in the command passed to the command-started listener, if any, equals the ``$clusterTime`` in the previous server reply. (Turning ``heartbeatFrequencyMS`` up prevents an intervening heartbeat from advancing the ``$clusterTime`` between these final two steps.) - -Repeat the above for: - -* An aggregate command from the ``aggregate`` helper method -* A find command from the ``find`` helper method -* An insert command from the ``insert_one`` helper method - -4. Explicit and implicit session arguments -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Register a command-started APM listener. If the driver has no APM support, inspect commands in another idiomatic way, such as monkey-patching or a mock server. -* Create ``client1`` -* Get ``database`` from ``client1`` -* Get ``collection`` from ``database`` -* Start ``session`` from ``client1`` -* Call ``collection.insertOne(session,...)`` -* Assert that the command passed to the command-started listener contained the session ``lsid`` from ``session``. -* Call ``collection.insertOne(,...)`` (*without* a session argument) -* Assert that the command passed to the command-started listener contained a session ``lsid``. - -Repeat the above for all methods that take a session parameter. - -5. Session argument is for the right client -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Create ``client1`` and ``client2`` -* Get ``database`` from ``client1`` -* Get ``collection`` from ``database`` -* Start ``session`` from ``client2`` -* Call ``collection.insertOne(session,...)`` -* Assert that an error was reported because ``session`` was not started from ``client1`` - -Repeat the above for all methods that take a session parameter. - -6. No further operations can be performed using a session after ``endSession`` has been called -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Start a ``session`` -* End the ``session`` -* Call ``collection.InsertOne(session, ...)`` -* Assert that the proper error was reported - -Repeat the above for all methods that take a session parameter. - -If your driver implements a platform dependent idiomatic disposal pattern, test -that also (if the idiomatic disposal pattern calls ``endSession`` it would be -sufficient to only test the disposal pattern since that ends up calling -``endSession``). - -7. Authenticating as multiple users suppresses implicit sessions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Skip this test if your driver does not allow simultaneous authentication with multiple users. - -* Authenticate as two users -* Call ``findOne`` with no explicit session -* Capture the command sent to the server -* Assert that the command sent to the server does not have an ``lsid`` field - -8. Client-side cursor that exhausts the results on the initial query immediately returns the implicit session to the pool -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Insert two documents into a collection -* Execute a find operation on the collection and iterate past the first document -* Assert that the implicit session is returned to the pool. This can be done in several ways: - - * Track in-use count in the server session pool and assert that the count has dropped to zero - * Track the lsid used for the find operation (e.g. with APM) and then do another operation and - assert that the same lsid is used as for the find operation. - -9. Client-side cursor that exhausts the results after a ``getMore`` immediately returns the implicit session to the pool -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Insert five documents into a collection -* Execute a find operation on the collection with batch size of 3 -* Iterate past the first four documents, forcing the final ``getMore`` operation -* Assert that the implicit session is returned to the pool prior to iterating past the last document - -10. No remaining sessions are checked out after each functional test -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -At the end of every individual functional test of the driver, there SHOULD be an -assertion that there are no remaining sessions checked out from the pool. This -may require changes to existing tests to ensure that they close any explicit -client sessions and any unexhausted cursors. - -11. For every combination of topology and readPreference, ensure that ``find`` and ``getMore`` both send the same session id -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Insert three documents into a collection -* Execute a ``find`` operation on the collection with a batch size of 2 -* Assert that the server receives a non-zero lsid -* Iterate through enough documents (3) to force a ``getMore`` -* Assert that the server receives a non-zero lsid equal to the lsid that ``find`` sent. - -12. Session pool can be cleared after forking without calling ``endSession`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Skip this test if your driver does not allow forking. - -* Create ClientSession -* Record its lsid -* Delete it (so the lsid is pushed into the pool) -* Fork -* In the parent, create a ClientSession and assert its lsid is the same. -* In the child, create a ClientSession and assert its lsid is different. - -13. Existing sessions are not checked into a cleared pool after forking -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Skip this test if your driver does not allow forking. - -* Create ClientSession -* Record its lsid -* Fork -* In the parent, return the ClientSession to the pool, create a new ClientSession, and assert its lsid is the same. -* In the child, return the ClientSession to the pool, create a new ClientSession, and assert its lsid is different. - -14. Implicit sessions only allocate their server session after a successful connection checkout -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Create a MongoClient with the following options: ``maxPoolSize=1`` and ``retryWrites=true``. If testing against a sharded deployment, the test runner MUST ensure that the MongoClient connects to only a single mongos host. -* Attach a command started listener that collects each command's lsid -* Initiate the following concurrent operations - - * ``insertOne({ }),`` - * ``deleteOne({ }),`` - * ``updateOne({ }, { $set: { a: 1 } }),`` - * ``bulkWrite([{ updateOne: { filter: { }, update: { $set: { a: 1 } } } }]),`` - * ``findOneAndDelete({ }),`` - * ``findOneAndUpdate({ }, { $set: { a: 1 } }),`` - * ``findOneAndReplace({ }, { a: 1 }),`` - * ``find().toArray()`` - -* Wait for all operations to complete successfully -* Assert the following across at least 5 retries of the above test: - - * Drivers MUST assert that exactly one session is used for all operations at - least once across the retries of this test. - * Note that it's possible, although rare, for >1 server session to be used - because the session is not released until after the connection is checked in. - * Drivers MUST assert that the number of allocated sessions is strictly less - than the number of concurrent operations in every retry of this test. In - this instance it would be less than (but NOT equal to) 8. - -15. ``lsid`` is added inside ``$query`` when using OP_QUERY -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This test only applies to drivers that have not implemented OP_MSG and still use OP_QUERY. - -* For a command to a mongos that includes a readPreference, verify that the - ``lsid`` on query commands is added inside the ``$query`` field, and NOT as a - top-level field. - -16. Authenticating as a second user after starting a session results in a server error -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This test only applies to drivers that allow authentication to be changed on the fly. - -* Authenticate as the first user -* Start a session by calling ``startSession`` -* Authenticate as a second user -* Call ``findOne`` using the session as an explicit session -* Assert that the driver returned an error because multiple users are authenticated - -17. Driver verifies that the session is owned by the current user -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This test only applies to drivers that allow authentication to be changed on the fly. - -* Authenticate as user A -* Start a session by calling ``startSession`` -* Logout user A -* Authenticate as user B -* Call ``findOne`` using the session as an explicit session -* Assert that the driver returned an error because the session is owned by a different user - -18. Implicit session is ignored if connection does not support sessions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Refer to `Testing against servers that do not support sessions`_ and configure a ``MongoClient`` -with command monitoring enabled. - -* Send a read command to the server (e.g., ``findOne``), ignoring any errors from the server response -* Check the corresponding ``commandStarted`` event: verify that ``lsid`` is not set -* Send a write command to the server (e.g., ``insertOne``), ignoring any errors from the server response -* Check the corresponding ``commandStarted`` event: verify that lsid is not set - -19. Explicit session raises an error if connection does not support sessions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Refer to `Testing against servers that do not support sessions`_ and configure a ``MongoClient`` -with default options. - -* Create a new explicit session by calling ``startSession`` (this MUST NOT error) -* Attempt to send a read command to the server (e.g., ``findOne``) with the explicit session passed in -* Assert that a client-side error is generated indicating that sessions are not supported -* Attempt to send a write command to the server (e.g., ``insertOne``) with the explicit session passed in -* Assert that a client-side error is generated indicating that sessions are not supported - -Changelog -========= - -:2019-05-15: Initial version. -:2021-06-15: Added snapshot-session tests. Introduced legacy and unified folders. -:2021-07-30: Use numbering for prose test -:2022-02-11: Convert legacy tests to unified format -:2022-06-13: Relocate prose test from spec document and apply new ordering -:2023-02-24: Fix formatting and add new prose tests 18 and 19 diff --git a/source/transactions-convenient-api/transactions-convenient-api.rst b/source/transactions-convenient-api/transactions-convenient-api.rst index 82f1136193..668a165331 100644 --- a/source/transactions-convenient-api/transactions-convenient-api.rst +++ b/source/transactions-convenient-api/transactions-convenient-api.rst @@ -44,7 +44,7 @@ ClientSession `Driver Session`_ specification. The name of this object MAY vary across drivers. -.. _Driver Session: ../sessions/driver-sessions.rst +.. _Driver Session: ../sessions/driver-sessions.md MongoClient The root object of a driver's API. The name of this object MAY vary across diff --git a/source/transactions/tests/unified/client-bulkWrite.json b/source/transactions/tests/unified/client-bulkWrite.json new file mode 100644 index 0000000000..f8f1d97169 --- /dev/null +++ b/source/transactions/tests/unified/client-bulkWrite.json @@ -0,0 +1,592 @@ +{ + "description": "client bulkWrite transactions", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client_with_wmajority", + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "session": { + "id": "session_with_wmajority", + "client": "client_with_wmajority" + } + } + ], + "_yamlAnchors": { + "namespace": "transaction-tests.coll0" + }, + "initialData": [ + { + "databaseName": "transaction-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite in a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for client bulkWrite in transaction", + "operations": [ + { + "object": "session_with_wmajority", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "client_with_wmajority", + "name": "clientBulkWrite", + "arguments": { + "session": "session_with_wmajority", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "session_with_wmajority", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client_with_wmajority", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with writeConcern in a transaction causes a transaction error", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "writeConcern": { + "w": 1 + }, + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot set write concern after starting a transaction" + } + } + ] + } + ] +} diff --git a/source/transactions/tests/unified/client-bulkWrite.yml b/source/transactions/tests/unified/client-bulkWrite.yml new file mode 100644 index 0000000000..eda2babbe7 --- /dev/null +++ b/source/transactions/tests/unified/client-bulkWrite.yml @@ -0,0 +1,262 @@ +description: "client bulkWrite transactions" +schemaVersion: "1.3" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name transaction-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + - session: + id: &session0 session0 + client: *client0 + - client: + id: &client_with_wmajority client_with_wmajority + uriOptions: + w: majority + observeEvents: + - commandStartedEvent + - session: + id: &session_with_wmajority session_with_wmajority + client: *client_with_wmajority + +_yamlAnchors: + namespace: &namespace "transaction-tests.coll0" + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + +tests: + - description: "client bulkWrite in a transaction" + operations: + - object: *session0 + name: startTransaction + - object: *client0 + name: clientBulkWrite + arguments: + session: *session0 + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + 0: + insertedId: 8 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 3: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 4 + deleteResults: + 4: + deletedCount: 1 + 5: + deletedCount: 2 + - object: *session0 + name: commitTransaction + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: 'client writeConcern ignored for client bulkWrite in transaction' + operations: + - object: *session_with_wmajority + name: startTransaction + arguments: + writeConcern: + w: 1 + - object: *client_with_wmajority + name: clientBulkWrite + arguments: + session: *session_with_wmajority + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + - object: *session_with_wmajority + name: commitTransaction + expectEvents: + - + client: *client_with_wmajority + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + lsid: { $$sessionLsid: *session_with_wmajority } + txnNumber: 1 + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + nsInfo: + - ns: *namespace + - + commandStartedEvent: + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session_with_wmajority } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: + w: 1 + commandName: commitTransaction + databaseName: admin + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + - { _id: 8, x: 88 } + - description: "client bulkWrite with writeConcern in a transaction causes a transaction error" + operations: + - object: *session0 + name: startTransaction + - object: *client0 + name: clientBulkWrite + arguments: + session: *session0 + writeConcern: + w: 1 + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + expectError: + isClientError: true + errorContains: "Cannot set write concern after starting a transaction" diff --git a/source/transactions/tests/unified/mongos-pin-auto-tests.py b/source/transactions/tests/unified/mongos-pin-auto-tests.py index 99a34b485d..ad2aeabd17 100644 --- a/source/transactions/tests/unified/mongos-pin-auto-tests.py +++ b/source/transactions/tests/unified/mongos-pin-auto-tests.py @@ -291,6 +291,11 @@ insert: *collection_name documents: - { _id : 1 }'''), + # clientBulkWrite: + 'clientBulkWrite': ('bulkWrite', '*client0', r'''models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 }'''), } # Maps from error_name to error_data. @@ -313,7 +318,11 @@ def create_pin_test(op_name, error_name): error_data = NON_TRANSIENT_ERRORS[error_name] if op_name.startswith('bulkWrite'): op_name = 'bulkWrite' - return TEMPLATE.format(**locals()) + test = TEMPLATE.format(**locals()) + if op_name == 'clientBulkWrite': + test += ' runOnRequirements:\n' + test += ' - minServerVersion: "8.0" # `bulkWrite` added to server 8.0"\n' + return test def create_unpin_test(op_name, error_name): @@ -324,7 +333,12 @@ def create_unpin_test(op_name, error_name): error_data = TRANSIENT_ERRORS[error_name] if op_name.startswith('bulkWrite'): op_name = 'bulkWrite' - return TEMPLATE.format(**locals()) + test = TEMPLATE.format(**locals()) + if op_name == 'clientBulkWrite': + test += ' runOnRequirements:\n' + test += ' - minServerVersion: "8.0" # `bulkWrite` added to server 8.0"\n' + return test + tests = [] diff --git a/source/transactions/tests/unified/mongos-pin-auto.json b/source/transactions/tests/unified/mongos-pin-auto.json index 93eac8bb77..27db520401 100644 --- a/source/transactions/tests/unified/mongos-pin-auto.json +++ b/source/transactions/tests/unified/mongos-pin-auto.json @@ -2004,6 +2004,104 @@ } ] }, + { + "description": "remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, { "description": "unpin after transient connection error on insertOne insert", "operations": [ @@ -5175,6 +5273,202 @@ ] } ] + }, + { + "description": "unpin after transient connection error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] } ] } diff --git a/source/transactions/tests/unified/mongos-pin-auto.yml b/source/transactions/tests/unified/mongos-pin-auto.yml index 7a76347555..a80dd62031 100644 --- a/source/transactions/tests/unified/mongos-pin-auto.yml +++ b/source/transactions/tests/unified/mongos-pin-auto.yml @@ -676,6 +676,36 @@ tests: - *abortTransaction outcome: *outcome + - description: remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + errorCode: 11601 + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsOmit: ["TransientTransactionError"] + - *assertSessionPinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + - description: unpin after transient connection error on insertOne insert operations: - *startTransaction @@ -1614,3 +1644,63 @@ tests: - *abortTransaction outcome: *outcome + - description: unpin after transient connection error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + closeConnection: true + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + - *assertSessionUnpinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + + - description: unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + errorCode: 91 + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + - *assertSessionUnpinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + diff --git a/source/transactions/transactions.md b/source/transactions/transactions.md index 76745b59bb..484ab33fb7 100644 --- a/source/transactions/transactions.md +++ b/source/transactions/transactions.md @@ -1,4 +1,4 @@ -# Driver Transactions Specification +# Transactions Specification - Status: Accepted - Minimum Server Version: 4.0 @@ -8,8 +8,8 @@ ______________________________________________________________________ ## **Abstract** Version 4.0 of the server introduces multi-statement transactions. This spec builds upon the -[Driver Sessions Specification](../sessions/driver-sessions.rst) to define how an application uses transactions and how -a driver interacts with the server to implement transactions. +[Driver Sessions Specification](../sessions/driver-sessions.md) to define how an application uses transactions and how a +driver interacts with the server to implement transactions. The API for transactions must be specified to ensure that all drivers and the mongo shell are consistent with each other, and to provide a natural interface for application developers and DBAs who use multi-statement transactions. @@ -23,7 +23,7 @@ The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SH ### **Terms** -This specification uses the terms defined in the [Driver Sessions Specification](../sessions/driver-sessions.rst) and +This specification uses the terms defined in the [Driver Sessions Specification](../sessions/driver-sessions.md) and [Retryable Writes Specification](../retryable-writes/retryable-writes.md). Additional terms are defined below. #### Resource Management Block @@ -289,7 +289,7 @@ containing the message "Transaction already in progress" without modifying any s startTransaction SHOULD report an error if the driver can detect that transactions are not supported by the deployment. A deployment does not support transactions when the deployment does not support sessions, or maxWireVersion \< 7, or the maxWireVersion \< 8 and the topology type is Sharded, see -[How to Check Whether a Deployment Supports Sessions](https://github.com/mongodb/specifications/blob/master/source/sessions/driver-sessions.rst#how-to-check-whether-a-deployment-supports-sessions). +[How to Check Whether a Deployment Supports Sessions](../sessions/driver-sessions.md#how-to-check-whether-a-deployment-supports-sessions). Note that checking the maxWireVersion does not guarantee that the deployment supports transactions, for example a MongoDB 4.0 replica set using MMAPv1 will report maxWireVersion 7 but does not support transactions. In this case, Drivers rely on the deployment to report an error when a transaction is started. @@ -636,7 +636,7 @@ Drivers MUST unpin a ClientSession in the following situations: 1. The transaction is aborted. The session MUST be unpinned regardless of whether or the `abortTransaction` command succeeds or fails, or was executed at all. If the operation fails with a retryable error, the session MUST be unpinned before performing server selection for the retry. -2. Any operation in the transcation, including `commitTransaction` fails with a TransientTransactionError. Transient +2. Any operation in the transaction, including `commitTransaction` fails with a TransientTransactionError. Transient errors indicate that the transaction in question has already been aborted or that the pinnned mongos is down/unavailable. Unpinning the session ensures that a subsequent `abortTransaction` (or `commitTransaction`) does not block waiting on a server that is unreachable. @@ -778,7 +778,7 @@ The Python driver serves as a reference implementation. ## **Design Rationale** -The design of this specification builds on the [Driver Sessions Specification](../sessions/driver-sessions.rst) and +The design of this specification builds on the [Driver Sessions Specification](../sessions/driver-sessions.md) and modifies the driver API as little as possible. Drivers will rely on the server to yield an error if an unsupported command is executed within a transaction. This will @@ -859,7 +859,7 @@ execute a command directly with minimum additional client-side logic. This specification depends on: -1. [Driver Sessions Specification](../sessions/driver-sessions.rst) +1. [Driver Sessions Specification](../sessions/driver-sessions.md) 2. [Retryable Writes Specification](../retryable-writes/retryable-writes.md) ## **Backwards Compatibility** @@ -1009,6 +1009,7 @@ The following commands are allowed inside transactions: 10. geoSearch 11. create 12. createIndexes on an empty collection created in the same transaction or on a non-existing collection +13. bulkWrite ### Why don’t drivers automatically retry commit after a write concern timeout error? @@ -1072,6 +1073,8 @@ objective of avoiding duplicate commits. ## **Changelog** +- 2024-05-08: Add bulkWrite to the list of commands allowed in transactions. + - 2024-02-15: Migrated from reStructuredText to Markdown. - 2023-11-22: Specify that non-transient transaction errors abort the transaction\ diff --git a/source/unified-test-format/schema-1.21.json b/source/unified-test-format/schema-1.21.json new file mode 100644 index 0000000000..9d22fe6209 --- /dev/null +++ b/source/unified-test-format/schema-1.21.json @@ -0,0 +1,1116 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Unified Test Format", + "type": "object", + "additionalProperties": false, + "required": [ + "description", + "schemaVersion", + "tests" + ], + "properties": { + "description": { + "type": "string" + }, + "schemaVersion": { + "$ref": "#/definitions/version" + }, + "runOnRequirements": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/runOnRequirement" + } + }, + "createEntities": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/entity" + } + }, + "initialData": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/collectionData" + } + }, + "tests": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/test" + } + }, + "_yamlAnchors": { + "type": "object", + "additionalProperties": true + } + }, + "definitions": { + "version": { + "type": "string", + "pattern": "^[0-9]+(\\.[0-9]+){1,2}$" + }, + "runOnRequirement": { + "type": "object", + "additionalProperties": false, + "minProperties": 1, + "properties": { + "maxServerVersion": { + "$ref": "#/definitions/version" + }, + "minServerVersion": { + "$ref": "#/definitions/version" + }, + "topologies": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "enum": [ + "single", + "replicaset", + "sharded", + "sharded-replicaset", + "load-balanced" + ] + } + }, + "serverless": { + "type": "string", + "enum": [ + "require", + "forbid", + "allow" + ] + }, + "serverParameters": { + "type": "object", + "minProperties": 1 + }, + "auth": { + "type": "boolean" + }, + "authMechanism": { + "type": "string" + }, + "csfle": { + "type": "boolean" + } + } + }, + "entity": { + "type": "object", + "additionalProperties": false, + "maxProperties": 1, + "minProperties": 1, + "properties": { + "client": { + "type": "object", + "additionalProperties": false, + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "uriOptions": { + "type": "object" + }, + "useMultipleMongoses": { + "type": "boolean" + }, + "observeEvents": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "enum": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "poolCreatedEvent", + "poolReadyEvent", + "poolClearedEvent", + "poolClosedEvent", + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionClosedEvent", + "connectionCheckOutStartedEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent", + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + "ignoreCommandMonitoringEvents": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + }, + "storeEventsAsEntities": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/storeEventsAsEntity" + } + }, + "observeLogMessages": { + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "properties": { + "command": { + "$ref": "#/definitions/logSeverityLevel" + }, + "topology": { + "$ref": "#/definitions/logSeverityLevel" + }, + "serverSelection": { + "$ref": "#/definitions/logSeverityLevel" + }, + "connection": { + "$ref": "#/definitions/logSeverityLevel" + } + } + }, + "serverApi": { + "$ref": "#/definitions/serverApi" + }, + "observeSensitiveCommands": { + "type": "boolean" + } + } + }, + "clientEncryption": { + "type": "object", + "additionalProperties": false, + "required": [ + "id", + "clientEncryptionOpts" + ], + "properties": { + "id": { + "type": "string" + }, + "clientEncryptionOpts": { + "$ref": "#/definitions/clientEncryptionOpts" + } + } + }, + "database": { + "type": "object", + "additionalProperties": false, + "required": [ + "id", + "client", + "databaseName" + ], + "properties": { + "id": { + "type": "string" + }, + "client": { + "type": "string" + }, + "databaseName": { + "type": "string" + }, + "databaseOptions": { + "$ref": "#/definitions/collectionOrDatabaseOptions" + } + } + }, + "collection": { + "type": "object", + "additionalProperties": false, + "required": [ + "id", + "database", + "collectionName" + ], + "properties": { + "id": { + "type": "string" + }, + "database": { + "type": "string" + }, + "collectionName": { + "type": "string" + }, + "collectionOptions": { + "$ref": "#/definitions/collectionOrDatabaseOptions" + } + } + }, + "session": { + "type": "object", + "additionalProperties": false, + "required": [ + "id", + "client" + ], + "properties": { + "id": { + "type": "string" + }, + "client": { + "type": "string" + }, + "sessionOptions": { + "type": "object" + } + } + }, + "bucket": { + "type": "object", + "additionalProperties": false, + "required": [ + "id", + "database" + ], + "properties": { + "id": { + "type": "string" + }, + "database": { + "type": "string" + }, + "bucketOptions": { + "type": "object" + } + } + }, + "thread": { + "type": "object", + "additionalProperties": false, + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + } + } + } + } + }, + "logComponent": { + "type": "string", + "enum": [ + "command", + "topology", + "serverSelection", + "connection" + ] + }, + "logSeverityLevel": { + "type": "string", + "enum": [ + "emergency", + "alert", + "critical", + "error", + "warning", + "notice", + "info", + "debug", + "trace" + ] + }, + "clientEncryptionOpts": { + "type": "object", + "additionalProperties": false, + "required": [ + "keyVaultClient", + "keyVaultNamespace", + "kmsProviders" + ], + "properties": { + "keyVaultClient": { + "type": "string" + }, + "keyVaultNamespace": { + "type": "string" + }, + "kmsProviders": { + "$ref": "#/definitions/kmsProviders" + } + } + }, + "kmsProviders": { + "$defs": { + "stringOrPlaceholder": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + "additionalProperties": false, + "required": [ + "$$placeholder" + ], + "properties": { + "$$placeholder": {} + } + } + ] + } + }, + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^aws(:[a-zA-Z0-9_]+)?$": { + "type": "object", + "additionalProperties": false, + "properties": { + "accessKeyId": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + }, + "secretAccessKey": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + }, + "sessionToken": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + } + } + }, + "^azure(:[a-zA-Z0-9_]+)?$": { + "type": "object", + "additionalProperties": false, + "properties": { + "tenantId": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + }, + "clientId": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + }, + "clientSecret": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + }, + "identityPlatformEndpoint": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + } + } + }, + "^gcp(:[a-zA-Z0-9_]+)?$": { + "type": "object", + "additionalProperties": false, + "properties": { + "email": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + }, + "privateKey": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + }, + "endpoint": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + } + } + }, + "^kmip(:[a-zA-Z0-9_]+)?$": { + "type": "object", + "additionalProperties": false, + "properties": { + "endpoint": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + } + } + }, + "^local(:[a-zA-Z0-9_]+)?$": { + "type": "object", + "additionalProperties": false, + "properties": { + "key": { + "$ref": "#/definitions/kmsProviders/$defs/stringOrPlaceholder" + } + } + } + } + }, + "storeEventsAsEntity": { + "type": "object", + "additionalProperties": false, + "required": [ + "id", + "events" + ], + "properties": { + "id": { + "type": "string" + }, + "events": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "enum": [ + "PoolCreatedEvent", + "PoolReadyEvent", + "PoolClearedEvent", + "PoolClosedEvent", + "ConnectionCreatedEvent", + "ConnectionReadyEvent", + "ConnectionClosedEvent", + "ConnectionCheckOutStartedEvent", + "ConnectionCheckOutFailedEvent", + "ConnectionCheckedOutEvent", + "ConnectionCheckedInEvent", + "CommandStartedEvent", + "CommandSucceededEvent", + "CommandFailedEvent", + "ServerDescriptionChangedEvent", + "TopologyDescriptionChangedEvent" + ] + } + } + } + }, + "collectionData": { + "type": "object", + "additionalProperties": false, + "required": [ + "collectionName", + "databaseName", + "documents" + ], + "properties": { + "collectionName": { + "type": "string" + }, + "databaseName": { + "type": "string" + }, + "createOptions": { + "type": "object", + "properties": { + "writeConcern": false + } + }, + "documents": { + "type": "array", + "items": { + "type": "object" + } + } + } + }, + "expectedEventsForClient": { + "type": "object", + "additionalProperties": false, + "required": [ + "client", + "events" + ], + "properties": { + "client": { + "type": "string" + }, + "eventType": { + "type": "string", + "enum": [ + "command", + "cmap", + "sdam" + ] + }, + "events": { + "type": "array" + }, + "ignoreExtraEvents": { + "type": "boolean" + } + }, + "oneOf": [ + { + "required": [ + "eventType" + ], + "properties": { + "eventType": { + "const": "command" + }, + "events": { + "type": "array", + "items": { + "$ref": "#/definitions/expectedCommandEvent" + } + } + } + }, + { + "required": [ + "eventType" + ], + "properties": { + "eventType": { + "const": "cmap" + }, + "events": { + "type": "array", + "items": { + "$ref": "#/definitions/expectedCmapEvent" + } + } + } + }, + { + "required": [ + "eventType" + ], + "properties": { + "eventType": { + "const": "sdam" + }, + "events": { + "type": "array", + "items": { + "$ref": "#/definitions/expectedSdamEvent" + } + } + } + }, + { + "additionalProperties": false, + "properties": { + "client": { + "type": "string" + }, + "events": { + "type": "array", + "items": { + "$ref": "#/definitions/expectedCommandEvent" + } + }, + "ignoreExtraEvents": { + "type": "boolean" + } + } + } + ] + }, + "expectedCommandEvent": { + "type": "object", + "additionalProperties": false, + "maxProperties": 1, + "minProperties": 1, + "properties": { + "commandStartedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "command": { + "type": "object" + }, + "commandName": { + "type": "string" + }, + "databaseName": { + "type": "string" + }, + "hasServiceId": { + "type": "boolean" + }, + "hasServerConnectionId": { + "type": "boolean" + } + } + }, + "commandSucceededEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "reply": { + "type": "object" + }, + "commandName": { + "type": "string" + }, + "databaseName": { + "type": "string" + }, + "hasServiceId": { + "type": "boolean" + }, + "hasServerConnectionId": { + "type": "boolean" + } + } + }, + "commandFailedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "commandName": { + "type": "string" + }, + "databaseName": { + "type": "string" + }, + "hasServiceId": { + "type": "boolean" + }, + "hasServerConnectionId": { + "type": "boolean" + } + } + } + } + }, + "expectedCmapEvent": { + "type": "object", + "additionalProperties": false, + "maxProperties": 1, + "minProperties": 1, + "properties": { + "poolCreatedEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + }, + "poolReadyEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + }, + "poolClearedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "hasServiceId": { + "type": "boolean" + }, + "interruptInUseConnections": { + "type": "boolean" + } + } + }, + "poolClosedEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + }, + "connectionCreatedEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + }, + "connectionReadyEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + }, + "connectionClosedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "reason": { + "type": "string" + } + } + }, + "connectionCheckOutStartedEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + }, + "connectionCheckOutFailedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "reason": { + "type": "string" + } + } + }, + "connectionCheckedOutEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + }, + "connectionCheckedInEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + } + } + }, + "expectedSdamEvent": { + "type": "object", + "additionalProperties": false, + "maxProperties": 1, + "minProperties": 1, + "properties": { + "serverDescriptionChangedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "previousDescription": { + "$ref": "#/definitions/serverDescription" + }, + "newDescription": { + "$ref": "#/definitions/serverDescription" + } + } + }, + "topologyDescriptionChangedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "previousDescription": { + "$ref": "#/definitions/topologyDescription" + }, + "newDescription": { + "$ref": "#/definitions/topologyDescription" + } + } + }, + "serverHeartbeatStartedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "awaited": { + "type": "boolean" + } + } + }, + "serverHeartbeatSucceededEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "awaited": { + "type": "boolean" + } + } + }, + "serverHeartbeatFailedEvent": { + "type": "object", + "additionalProperties": false, + "properties": { + "awaited": { + "type": "boolean" + } + } + }, + "topologyOpeningEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + }, + "topologyClosedEvent": { + "type": "object", + "additionalProperties": false, + "properties": {} + } + } + }, + "serverDescription": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "Standalone", + "Mongos", + "PossiblePrimary", + "RSPrimary", + "RSSecondary", + "RSOther", + "RSArbiter", + "RSGhost", + "LoadBalancer", + "Unknown" + ] + } + } + }, + "topologyDescription": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "Single", + "Unknown", + "ReplicaSetNoPrimary", + "ReplicaSetWithPrimary", + "Sharded", + "LoadBalanced" + ] + } + } + }, + "expectedLogMessagesForClient": { + "type": "object", + "additionalProperties": false, + "required": [ + "client", + "messages" + ], + "properties": { + "client": { + "type": "string" + }, + "messages": { + "type": "array", + "items": { + "$ref": "#/definitions/expectedLogMessage" + } + }, + "ignoreExtraMessages": { + "type": "boolean" + }, + "ignoreMessages": { + "type": "array", + "items": { + "$ref": "#/definitions/expectedLogMessage" + } + } + } + }, + "expectedLogMessage": { + "type": "object", + "additionalProperties": false, + "required": [ + "level", + "component", + "data" + ], + "properties": { + "level": { + "$ref": "#/definitions/logSeverityLevel" + }, + "component": { + "$ref": "#/definitions/logComponent" + }, + "data": { + "type": "object" + }, + "failureIsRedacted": { + "type": "boolean" + } + } + }, + "collectionOrDatabaseOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "readConcern": { + "type": "object" + }, + "readPreference": { + "type": "object" + }, + "writeConcern": { + "type": "object" + }, + "timeoutMS": { + "type": "integer" + } + } + }, + "serverApi": { + "type": "object", + "additionalProperties": false, + "required": [ + "version" + ], + "properties": { + "version": { + "type": "string" + }, + "strict": { + "type": "boolean" + }, + "deprecationErrors": { + "type": "boolean" + } + } + }, + "operation": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "object" + ], + "properties": { + "name": { + "type": "string" + }, + "object": { + "type": "string" + }, + "arguments": { + "type": "object" + }, + "ignoreResultAndError": { + "type": "boolean" + }, + "expectError": { + "$ref": "#/definitions/expectedError" + }, + "expectResult": {}, + "saveResultAsEntity": { + "type": "string" + } + }, + "allOf": [ + { + "not": { + "required": [ + "expectError", + "expectResult" + ] + } + }, + { + "not": { + "required": [ + "expectError", + "saveResultAsEntity" + ] + } + }, + { + "not": { + "required": [ + "ignoreResultAndError", + "expectResult" + ] + } + }, + { + "not": { + "required": [ + "ignoreResultAndError", + "expectError" + ] + } + }, + { + "not": { + "required": [ + "ignoreResultAndError", + "saveResultAsEntity" + ] + } + } + ] + }, + "expectedError": { + "type": "object", + "additionalProperties": false, + "minProperties": 1, + "properties": { + "isError": { + "type": "boolean", + "const": true + }, + "isClientError": { + "type": "boolean" + }, + "isTimeoutError": { + "type": "boolean" + }, + "errorContains": { + "type": "string" + }, + "errorCode": { + "type": "integer" + }, + "errorCodeName": { + "type": "string" + }, + "errorLabelsContain": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + }, + "errorLabelsOmit": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + }, + "writeErrors": { + "type": "object" + }, + "writeConcernErrors": { + "type": "array", + "items": { + "type": "object" + } + }, + "errorResponse": { + "type": "object" + }, + "expectResult": {} + } + }, + "test": { + "type": "object", + "additionalProperties": false, + "required": [ + "description", + "operations" + ], + "properties": { + "description": { + "type": "string" + }, + "runOnRequirements": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/runOnRequirement" + } + }, + "skipReason": { + "type": "string" + }, + "operations": { + "type": "array", + "items": { + "$ref": "#/definitions/operation" + } + }, + "expectEvents": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/expectedEventsForClient" + } + }, + "expectLogMessages": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/expectedLogMessagesForClient" + } + }, + "outcome": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/collectionData" + } + } + } + } + } +} diff --git a/source/unified-test-format/tests/Makefile b/source/unified-test-format/tests/Makefile index 5c30f9a66d..a2b79e3f70 100644 --- a/source/unified-test-format/tests/Makefile +++ b/source/unified-test-format/tests/Makefile @@ -1,4 +1,4 @@ -SCHEMA=../schema-1.20.json +SCHEMA=../schema-1.21.json .PHONY: all invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout HAS_AJV diff --git a/source/unified-test-format/unified-test-format.md b/source/unified-test-format/unified-test-format.md index 17a7e5bf12..dba41fb34b 100644 --- a/source/unified-test-format/unified-test-format.md +++ b/source/unified-test-format/unified-test-format.md @@ -31,10 +31,10 @@ This test format can be used to define tests for the following specifications: - [GridFS](../gridfs/gridfs-spec.md) - [Retryable Reads](../retryable-reads/retryable-reads.md) - [Retryable Writes](../retryable-writes/retryable-writes.md) -- [Sessions](../sessions/driver-sessions.rst) +- [Sessions](../sessions/driver-sessions.md) - [Transactions](../transactions/transactions.md) - [Convenient API for Transactions](../transactions-convenient-api/transactions-convenient-api.rst) -- [Server Discovery and Monitoring](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst) +- [Server Discovery and Monitoring](../server-discovery-and-monitoring/server-discovery-and-monitoring.md) This is not an exhaustive list. Specifications that are known to not be supported by this format may be discussed under [Future Work](#future-work). @@ -188,7 +188,7 @@ Test runners MUST support the following types of entities: - ClientSession. See [entity_session](#entity_session) and [Session Operations](#session-operations). - GridFS Bucket. See [entity_bucket](#entity_bucket) and [Bucket Operations](#bucket-operations). - + - ChangeStream. Change stream entities are special in that they are not defined in [createEntities](#createentities) but are instead created by using [operation.saveResultAsEntity](#operation_saveResultAsEntity) with a @@ -223,17 +223,17 @@ Test runners MUST support the following types of entities: Tests SHOULD NOT utilize deprecated types (e.g. 0x0E: Symbol), since they may not be supported by all drivers and could yield runtime errors (e.g. while loading a test file with an Extended JSON parser). - + - Test runner thread. An entity representing a "thread" that can be used to concurrently execute operations. Thread entities MUST be able to run concurrently with the main test runner thread and other thread entities, but they do not have to be implemented as actual OS threads (e.g. they can be goroutines or async tasks). See [entity_thread_object](#entity_thread_object) for more information on how they are created. - + - TopologyDescription. An entity representing a client's - [TopologyDescription](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#topologydescription) at a + [TopologyDescription](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#topologydescription) at a certain point in time. These entities are not defined in [createEntities](#createentities) but are instead created via [recordTopologyDescription](#recordtopologydescription) test runner operations. @@ -262,7 +262,7 @@ The top-level fields of a test file are as follows: This SHOULD describe the common purpose of tests in this file and MAY refer to the filename (e.g. "updateOne-hint"). - + - `schemaVersion`: Required string. Version of this specification with which the test file complies. @@ -274,13 +274,13 @@ The top-level fields of a test file are as follows: specific patch versions since patch-level changes SHOULD NOT alter the structure of the test format (as previously noted in [Schema Version](#schema-version)). - + - `runOnRequirements`: Optional array of one or more [runOnRequirement](#runonrequirement) objects. List of server version and/or topology requirements for which the tests in this file can be run. If no requirements are met, the test runner MUST skip this test file. - + - `createEntities`: Optional array of one or more [entity](#entity) objects. List of entities (e.g. client, collection, session objects) that SHALL be created before each test case is executed. @@ -288,12 +288,12 @@ The top-level fields of a test file are as follows: Test files SHOULD define entities in dependency order, such that all referenced entities (e.g. client) are defined before any of their dependent entities (e.g. database, session). - + - `initialData`: Optional array of one or more [collectionData](#collectiondata) objects. Data that will exist in collections before each test case is executed. - + - `tests`: Required array of one or more [test](#test) objects. List of test cases to be executed independently of each other. @@ -354,7 +354,7 @@ The structure of this object is as follows: parameter, test runners MUST treat the comparison as not equal and skip the test. This includes errors that occur when fetching a single parameter using `getParameter`. - + - `auth`: Optional boolean. If true, the tests MUST only run if authentication is enabled. If false, tests MUST NOT run if authentication is enabled. If this field is omitted, there is no authentication requirement. @@ -393,7 +393,7 @@ on the entity's `id` key. This anchor will allow the unique name to be reference The structure of this object is as follows: - + - `client`: Optional object. Defines a MongoClient object. In addition to the configuration defined below, test runners for drivers that implement connection pooling MUST track the number of connections checked out at any given time for @@ -409,13 +409,13 @@ The structure of this object is as follows: - `uriOptions`: Optional object. Additional URI options to apply to the test suite's connection string that is used to create this client. Any keys in this object MUST override conflicting keys in the connection string. - Documentation for supported options may be found in the [URI Options](../uri-options/uri-options.rst) spec, with one + Documentation for supported options may be found in the [URI Options](../uri-options/uri-options.md) spec, with one notable exception: if `readPreferenceTags` is specified in this object, the key will map to an array of strings, each representing a tag set, since it is not feasible to define multiple `readPreferenceTags` keys in the object. Note also that when specifying `directConnection` as true, the connection string used to instantiate a client MUST only have a single seed and MUST NOT specify the `replicaSet` option. See the - [URI Options spec](../uri-options/uri-options.rst#directconnection-uri-option-with-multiple-seeds-or-srv-uri) for + [URI Options spec](../uri-options/uri-options.md#directconnection-uri-option-with-multiple-seeds-or-srv-uri) for more information. Any field in `uriOptions` may be a [$$placeholder](#placeholder) document and the test runner MUST support replacing @@ -428,7 +428,7 @@ The structure of this object is as follows: ENVIRONMENT: { $$placeholder: 1 } ``` - + - `useMultipleMongoses`: Optional boolean. If true and the topology is a sharded cluster, the test runner MUST assert that this MongoClient connects to multiple mongos hosts (e.g. by inspecting the connection string). If false and the @@ -454,7 +454,7 @@ The structure of this object is as follows: This option has no effect for topologies that are not sharded or load balanced. - + - `observeEvents`: Optional array of one or more strings. Types of events that can be observed for this client. Unspecified event types MUST be ignored by this client's event listeners and SHOULD NOT be included in @@ -483,7 +483,7 @@ The structure of this object is as follows: - [serverHeartbeatFailedEvent](#expectedEvent_serverHeartbeatFailedEvent) - [topologyDescriptionChangedEvent](#expectedEvent_topologyDescriptionChangedEvent) - + - `ignoreCommandMonitoringEvents`: Optional array of one or more strings. Command names for which the test runner MUST ignore any observed command monitoring events. The command(s) will be ignored in addition to `configureFailPoint` @@ -494,7 +494,7 @@ The structure of this object is as follows: Test files SHOULD NOT use this option unless one or more command monitoring events are specified in [observeEvents](#entity_client_observeEvents). - + - `observeSensitiveCommands`: Optional boolean. If true, events associated with sensitive commands (per the [Command Logging and Monitoring](../command-logging-and-monitoring/command-logging-and-monitoring.md#security) spec) @@ -503,7 +503,7 @@ The structure of this object is as follows: Authentication SHOULD be disabled when this property is true, i.e. [auth](#runOnRequirement_auth) should be false for each `runOnRequirement`. See [rationale_observeSensitiveCommands](#rationale_observeSensitiveCommands). - + - `storeEventsAsEntities`: Optional array of one or more [storeEventsAsEntity](#storeeventsasentity) objects. Each object denotes an entity name and one or more events to be collected and stored in that entity. See @@ -520,7 +520,7 @@ The structure of this object is as follows: events: [PoolCreatedEvent, ConnectionCreatedEvent, CommandStartedEvent] ``` - + - `observeLogMessages`: Optional object where the key names are log [components](../logging/logging.md#components) and the values are minimum [log severity levels](../logging/logging.md#log-severity-levels) indicating which components @@ -529,7 +529,7 @@ The structure of this object is as follows: collector(s) and SHOULD NOT be included in [test.expectLogMessages](#test_expectLogMessages) for this client. - `serverApi`: Optional [serverApi](#serverapi) object. - + - `clientEncryption`: Optional object. Defines a ClientEncryption object. @@ -576,7 +576,7 @@ The structure of this object is as follows: See the [Client-Side Encryption test README](../client-side-encryption/tests/README.md#credentials) for instructions to obtain test credentials. - + - `database`: Optional object. Defines a Database object. @@ -592,7 +592,7 @@ The structure of this object is as follows: `databaseName: &database0Name foo`). - `databaseOptions`: Optional [collectionOrDatabaseOptions](#collectionordatabaseoptions) object. - + - `collection`: Optional object. Defines a Collection object. @@ -608,7 +608,7 @@ The structure of this object is as follows: `collectionName: &collection0Name foo`). - `collectionOptions`: Optional [collectionOrDatabaseOptions](#collectionordatabaseoptions) object. - + - `session`: Optional object. Defines an explicit ClientSession object. @@ -622,18 +622,18 @@ The structure of this object is as follows: `client: *client0`). - `sessionOptions`: Optional object. Map of parameters to pass to - [MongoClient.startSession](../sessions/driver-sessions.rst#startsession) when creating the session. Supported - options are defined in the following specifications: + [MongoClient.startSession](../sessions/driver-sessions.md#startsession) when creating the session. Supported options + are defined in the following specifications: - [Causal Consistency](../causal-consistency/causal-consistency.md#sessionoptions-changes) - - [Snapshot Reads](../sessions/snapshot-sessions.rst#sessionoptions-changes) + - [Snapshot Reads](../sessions/snapshot-sessions.md#sessionoptions-changes) - [Transactions](../transactions/transactions.md#sessionoptions-changes) - [Client Side Operations Timeout](../client-side-operations-timeout/client-side-operations-timeout.md#sessions) When specifying TransactionOptions for `defaultTransactionOptions`, the transaction options MUST remain nested under `defaultTransactionOptions` and MUST NOT be flattened into `sessionOptions`. - + - `bucket`: Optional object. Defines a Bucket object, as defined in the [GridFS](../gridfs/gridfs-spec.md) spec. @@ -648,7 +648,7 @@ The structure of this object is as follows: defined in the [GridFS](../gridfs/gridfs-spec.md#configurable-gridfsbucket-class) specification. The `readConcern`, `readPreference`, and `writeConcern` options use the same structure as defined in [Common Options](#common-options). - + - `thread`: Optional object. Defines a test runner "thread". Once the "thread" has been created, it should be idle and waiting for operations to be dispatched to it later on by [runOnThread](#runonthread) operations. @@ -749,7 +749,7 @@ The structure of this object is as follows: This SHOULD describe the purpose of this test (e.g. "insertOne is retried"). - + - `runOnRequirements`: Optional array of one or more [runOnRequirement](#runonrequirement) objects. List of server version and/or topology requirements for which this test can be run. If specified, these requirements are evaluated @@ -761,17 +761,17 @@ The structure of this object is as follows: of requirements MUST be satisfied in order for a test to be executed and more permissive requirements at the test-level could be taken out of context on their own. - + - `skipReason`: Optional string. If set, the test will be skipped. The string SHOULD explain the reason for skipping the test (e.g. JIRA ticket). - + - `operations`: Required array of one or more [operation](#operation) objects. List of operations to be executed for the test case. - + - `expectEvents`: Optional array of one or more [expectedEventsForClient](#expectedeventsforclient) objects. For one or more clients, a list of events that are expected to be observed in a particular order. @@ -784,7 +784,7 @@ The structure of this object is as follows: [expectedEventsForClient](#expectedeventsforclient) objects with the `eventType` set to `cmap` for both would either be redundant (if the `events` arrays were identical) or likely to fail (if the `events` arrays differed). - + - `expectLogMessages`: Optional array of one or more [expectedLogMessagesForClient](#expectedlogmessagesforclient) objects. For one or more clients, a list of log messages that are expected to be observed in a particular order. @@ -797,7 +797,7 @@ The structure of this object is as follows: Tests SHOULD NOT specify multiple [expectedLogMessagesForClient](#expectedlogmessagesforclient) objects for a single client entity. - + - `outcome`: Optional array of one or more [collectionData](#collectiondata) objects. Data that is expected to exist in collections after each test case is executed. @@ -811,18 +811,18 @@ An operation to be executed as part of the test. The structure of this object is as follows: - + - `name`: Required string. Name of the operation (e.g. method) to perform on the object. - + - `object`: Required string. Name of the object on which to perform the operation. This SHOULD correspond to either an [entity](#entity) name (for [Entity Test Operations](#entity-test-operations)) or "testRunner" (for [Special Test Operations](#special-test-operations)). If the object is an entity, The YAML file SHOULD use an [alias node](https://yaml.org/spec/1.2/spec.html#id2786196) for its `id` field (e.g. `object: *collection0`). - + - `arguments`: Optional object. Map of parameter names and values for the operation. The structure of this object will vary based on the operation. See [Entity Test Operations](#entity-test-operations) and @@ -830,16 +830,16 @@ The structure of this object is as follows: The `session` parameter is handled specially (see [commonOptions_session](#commonOptions_session)). - + - `ignoreResultAndError`: Optional boolean. If true, both the error and result for the operation MUST be ignored. - This field is mutally exclusive with [expectResult](#operation_expectResult), [expectError](#operation_expectError), + This field is mutually exclusive with [expectResult](#operation_expectResult), [expectError](#operation_expectError), and [saveResultAsEntity](#operation_saveResultAsEntity). This field SHOULD NOT be used for [Special Test Operations](#special-test-operations) (i.e. `object: testRunner`). - + - `expectError`: Optional [expectedError](#expectederror) object. One or more assertions for an error expected to be raised by the operation. @@ -849,7 +849,7 @@ The structure of this object is as follows: This field SHOULD NOT be used for [Special Test Operations](#special-test-operations) (i.e. `object: testRunner`). - + - `expectResult`: Optional mixed type. A value corresponding to the expected result of the operation. This field may be a scalar value, a single document, or an array of values. Test runners MUST follow the rules in @@ -859,7 +859,7 @@ The structure of this object is as follows: This field SHOULD NOT be used for [Special Test Operations](#special-test-operations) (i.e. `object: testRunner`). - + - `saveResultAsEntity`: Optional string. If specified, the actual result returned by the operation (if any) will be saved with this name in the [Entity Map](#entity-map). The test runner MUST raise an error if the name is already in @@ -926,7 +926,21 @@ The structure of this object is as follows: to have. The test runner MUST assert that the error does not contain any of the specified labels (e.g. using the `hasErrorLabel` method). - +- `writeErrors`: Optional document. The write errors expected to be present in the error. The `writeErrors` document + contains numeric keys representing the index of the write that failed and `writeError` object values. The test runner + MUST assert that the error contains a `writeError` for each index present in `writeErrors` and MUST assert that the + `writeError`s match as root-level documents according to the rules in [Evaluating Matches](#evaluating-matches). The + test runner MUST assert that the error does not contain any additional `writeError`s. This field is only intended for + use with the [clientBulkWrite](#clientbulkwrite) operation. + +- `writeConcernErrors`: Optional array of one or more objects. An ordered list of write concern errors expected to be + present in the error. The test runner MUST assert that each `writeConcernError` in this list matches the + `writeConcernError` present at the same index in the error's list of `writeConcernError`s as a root-level document + according to the rules in [Evaluating Matches](#evaluating-matches). The test runner MUST assert that the error does + not contain any additional `writeConcernErrors`s. This field is only intended for use with the + [clientBulkWrite](#clientbulkwrite) operation. + + - `errorResponse`: Optional document. A value corresponding to the expected server response. The test runner MUST assert that the error includes a server response that matches this value as a root-level document according to the rules in @@ -937,7 +951,7 @@ The structure of this object is as follows: may not provide direct access to a single response. Tests SHOULD avoid using `errorResponse` for such operations if possible; otherwise, affected drivers SHOULD skip such tests if necessary. - + - `expectResult`: Optional mixed type. This field follows the same rules as [operation.expectResult](#operation_expectResult) and is only used in cases where the error includes a result (e.g. @@ -988,7 +1002,7 @@ The events allowed in an `expectedEvent` object depend on the value of `eventTyp The structure of this object is as follows: - + - `commandStartedEvent`: Optional object. Assertions for one or more [CommandStartedEvent](../command-logging-and-monitoring/command-logging-and-monitoring.md#api) fields. @@ -1004,7 +1018,7 @@ The structure of this object is as follows: - `hasServiceId`: Defined in [hasServiceId](#hasserviceid). - `hasServerConnectionId`: Defined in [hasServerConnectionId](#hasserverconnectionid). - + - `commandSucceededEvent`: Optional object. Assertions for one or more [CommandSucceededEvent](../command-logging-and-monitoring/command-logging-and-monitoring.md#api) fields. @@ -1020,7 +1034,7 @@ The structure of this object is as follows: - `hasServiceId`: Defined in [hasServiceId](#hasserviceid). - `hasServerConnectionId`: Defined in [hasServerConnectionId](#hasserverconnectionid). - + - `commandFailedEvent`: Optional object. Assertions for one or more [CommandFailedEvent](../command-logging-and-monitoring/command-logging-and-monitoring.md#api) fields. @@ -1036,17 +1050,17 @@ The structure of this object is as follows: ##### expectedCmapEvent - + - `poolCreatedEvent`: Optional object. If present, this object MUST be an empty document as all fields in this event are non-deterministic. - + - `poolReadyEvent`: Optional object. If present, this object MUST be an empty document as all fields in this event are non-deterministic. - + - `poolClearedEvent`: Optional object. Assertions for one or more [PoolClearedEvent](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#events) fields. @@ -1057,22 +1071,22 @@ The structure of this object is as follows: - `interruptInUseConnections`: Optional boolean. If specified, test runners MUST assert that the field is set and matches this value. - + - `poolClosedEvent`: Optional object. If present, this object MUST be an empty document as all fields in this event are non-deterministic. - + - `connectionCreatedEvent`: Optional object. If present, this object MUST be an empty document as all fields in this event are non-deterministic. - + - `connectionReadyEvent`: Optional object. If present, this object MUST be an empty document as all fields in this event are non-deterministic. - + - `connectionClosedEvent`: Optional object. Assertions for one or more [ConnectionClosedEvent](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#events) fields. @@ -1082,12 +1096,12 @@ The structure of this object is as follows: - `reason`: Optional string. Test runners MUST assert that the reason in the published event matches this value. Valid values for this field are defined in the CMAP spec. - + - `connectionCheckOutStartedEvent`: Optional object. If present, this object MUST be an empty document as all fields in this event are non-deterministic. - + - `connectionCheckOutFailedEvent`: Optional object. Assertions for one or more [ConnectionCheckOutFailedEvent](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#events) @@ -1098,12 +1112,12 @@ The structure of this object is as follows: - `reason`: Optional string. Test runners MUST assert that the reason in the published event matches this value. Valid values for this field are defined in the CMAP spec. - + - `connectionCheckedOutEvent`: Optional object. If present, this object MUST be an empty document as all fields in this event are non-deterministic. - + - `connectionCheckedInEvent`: Optional object. If present, this object MUST be an empty document as all fields in this event are non-deterministic. @@ -1112,7 +1126,7 @@ The structure of this object is as follows: The structure of this object is as follows: - + - `serverDescriptionChangedEvent`: Optional object. Assertions for one or more [ServerDescriptionChangedEvent](../server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md#events) @@ -1130,10 +1144,10 @@ The structure of this object is as follows: - `type`: Optional string. The type of the server in the description. Test runners MUST assert that the type in the published event matches this value. See - [SDAM: ServerType](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#servertype) for a list of + [SDAM: ServerType](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#servertype) for a list of valid values. - + - `serverHeartbeatStartedEvent`: Optional object. Assertions for one or more [ServerHeartbeatStartedEvent](../server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md#events) @@ -1143,7 +1157,7 @@ The structure of this object is as follows: - `awaited`: Optional boolean. If specified, test runners MUST assert that the field is set and matches this value. - + - `serverHeartbeatSucceededEvent`: Optional object. Assertions for one or more [ServerHeartbeatSucceededEvent](../server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md#events) @@ -1153,7 +1167,7 @@ The structure of this object is as follows: - `awaited`: Optional boolean. If specified, test runners MUST assert that the field is set and matches this value. - + - `serverHeartbeatFailedEvent`: Optional object. Assertions for one or more [ServerHeartbeatFailedEvent](../server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md#events) @@ -1163,7 +1177,7 @@ The structure of this object is as follows: - `awaited`: Optional boolean. If specified, test runners MUST assert that the field is set and matches this value. - + - `topologyDescriptionChangedEvent`: Optional object. Assertions for one [TopologyDescriptionChangedEvent](../server-discovery-and-monitoring/server-discovery-and-monitoring-logging-and-monitoring.md#events) @@ -1184,7 +1198,7 @@ The structure of this object is as follows: - `type`: Optional string. The type of the topology in the description. Test runners MUST assert that the type in the published event matches this value. See - [SDAM: TopologyType](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#topologytype) for a + [SDAM: TopologyType](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#topologytype) for a list of valid values. Test runners SHOULD ignore any other fields present on the `previousDescription` and `newDescription` fields of the @@ -1287,23 +1301,23 @@ Comprehensive documentation for some of these types and their parameters may be - [Read and Write Concern](../read-write-concern/read-write-concern.rst). - [Server Selection: Read Preference](../server-selection/server-selection.md#read-preference). -- [Server Discovery and Monitoring: TopologyDescription](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#topologydescription). +- [Server Discovery and Monitoring: TopologyDescription](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#topologydescription). The structure of these common options is as follows: - + - `collectionName`: String. Collection name. The YAML file SHOULD use an [alias node](https://yaml.org/spec/1.2/spec.html#id2786196) for a collection entity's `collectionName` field (e.g. `collectionName: *collection0Name`). - + - `databaseName`: String. Database name. The YAML file SHOULD use an [alias node](https://yaml.org/spec/1.2/spec.html#id2786196) for a database entity's `databaseName` field (e.g. `databaseName: *database0Name`). - + - `readConcern`: Object. Map of parameters to construct a read concern. @@ -1311,7 +1325,7 @@ The structure of these common options is as follows: - `level`: Required string. - + - `readPreference`: Object. Map of parameters to construct a read preference. @@ -1322,19 +1336,19 @@ The structure of these common options is as follows: - `maxStalenessSeconds`: Optional integer. - `hedge`: Optional object. - + - `client`: String. Client entity name, which the test runner MUST resolve to a MongoClient object. The YAML file SHOULD use an [alias node](https://yaml.org/spec/1.2/spec.html#id2786196) for a client entity's `id` field (e.g. `client: *client0`). - + - `session`: String. Session entity name, which the test runner MUST resolve to a ClientSession object. The YAML file SHOULD use an [alias node](https://yaml.org/spec/1.2/spec.html#id2786196) for a session entity's `id` field (e.g. `session: *session0`). - + - `writeConcern`: Object. Map of parameters to construct a write concern. @@ -1366,7 +1380,10 @@ Entity operations correspond to an API method on a driver object. If [operation. method on that class. Test files SHALL use camelCase when referring to API methods and parameters, even if the defining specifications use -other forms (e.g. snake_case in GridFS). +other forms (e.g. snake_case in GridFS). Test files SHOULD use the exact API method names defined in specifications for +entity test operations. Test files MAY use a different descriptive name if a naming conflict occurs. For example, the +name "clientBulkWrite" is used for the client-level bulk write operation to differentiate it from the collection-level +bulk write operation. This spec does not provide exhaustive documentation for all possible API methods that may appear in a test; however, the following sections discuss all supported entities and their operations in some level of detail. Special handling for @@ -1425,7 +1442,7 @@ Because drivers do not consistently propagate errors encountered while closing a NOT specify any operations for a client entity or any entity descended from it following a `close` operation on it, as driver behavior when an operation is attempted on a closed client or one of its descendant objects is not consistent. - + #### createChangeStream @@ -1439,6 +1456,58 @@ the resulting change stream might be saved with [operation.saveResultAsEntity](# Test runners MUST NOT iterate the change stream when executing this operation and test files SHOULD NOT specify [operation.expectResult](#operation_expectResult) for this operation. +#### clientBulkWrite + +These considerations only apply to the `MongoClient.bulkWrite` method. See [bulkWrite](#bulkwrite) for special +considerations for `MongoCollection.bulkWrite`. + +The `models` parameter for `clientBulkWrite` is documented as a list of WriteModel interfaces. Each WriteModel +implementation (e.g. InsertOneModel) provides important context to the method, but that type information is not easily +expressed in YAML and JSON. To account for this, test files MUST nest each WriteModel object in a single-key object, +where the key identifies the request type (e.g. "insertOne") and its value is an object expressing the parameters, as in +the following example: + +``` +arguments: + models: + - insertOne: + document: { _id: 1, x: 1 } + - replaceOne: + filter: { _id: 2 } + replacement: { x: 2 } + upsert: true + - updateOne: + filter: { _id: 3 } + update: { $set: { x: 3 } } + upsert: true + - updateMany: + filter: { } + update: { $inc: { x: 1 } } + - deleteOne: + filter: { x: 2 } + - deleteMany: + filter: { x: { $gt: 2 } } + ordered: true +``` + +Because the `insertResults`, `updateResults`, and `deleteResults` may be absent or empty in the `BulkWriteResult` +returned from a summary-only bulk write, the `clientBulkWrite` operation SHOULD use the +[$$unsetOrMatches](#unsetormatches) operator for assertions on these fields when `verboseResults` is not set to true. +This also applies to result objects defined in the `expectedResult` field of [expectedError](#expectederror). + +The `BulkWriteException` thrown by `MongoClient.bulkWrite` contains an optional `error` field that stores a top-level +error that occurred during the bulk write. Test runners MUST inspect the contents of this field when making assertions +based on the contents of the `errorCode` and `errorContains` fields in [expectedError](#expectederror). + +`BulkWriteException` also contains `writeErrors` and `writeConcernErrors` fields that define the individual write errors +and write concern errors that occurred during the bulk write. Unified tests SHOULD use `writeErrors` and +`writeConcernErrors` in `expectedError` to assert on the contents of these fields. Test runners MUST NOT inspect the +contents of these fields when making assertions based on any other fields defined in `expectedError`. + +While operations typically raise an error *or* return a result, the `MongoClient.bulkWrite` operation may report both +via the `partialResult` property of a `BulkWriteException`. In this case, the intermediary write result may be matched +with [expectedError_expectResult](#expectedError_expectResult) + #### watch This operation SHOULD NOT be used in test files. See [client_createChangeStream](#client_createChangeStream). @@ -1483,7 +1552,7 @@ below. When executing an `aggregate` operation, the test runner MUST fully iterate the result. This will ensure consistent behavior between drivers that eagerly create a server-side cursor and those that do so lazily when iteration begins. - + #### createChangeStream @@ -1576,7 +1645,7 @@ These operations and their arguments may be documented in the following specific Collection operations that require special handling or are not documented by an existing specification are described below. - + #### aggregate @@ -1585,6 +1654,9 @@ behavior between drivers that eagerly create a server-side cursor and those that #### bulkWrite +These considerations only apply to the `MongoCollection.bulkWrite` method. See [clientBulkWrite](#clientbulkwrite) for +special considerations for `MongoClient.bulkWrite`. + The `requests` parameter for `bulkWrite` is documented as a list of WriteModel interfaces. Each WriteModel implementation (e.g. InsertOneModel) provides important context to the method, but that type information is not easily expressed in YAML and JSON. To account for this, test files MUST nest each WriteModel object in a single-key object, @@ -1632,7 +1704,7 @@ BulkWriteException MAY translate the expected code name to a number (see: instead, but MUST raise an error if the comparison cannot be attempted (e.g. `code` is also not available, translation fails). - + #### createChangeStream @@ -1646,7 +1718,7 @@ before the resulting change stream might be saved with [operation.saveResultAsEn Test runners MUST NOT iterate the change stream when executing this operation and test files SHOULD NOT specify [operation.expectResult](#operation_expectResult) for this operation. - + #### createFindCursor @@ -1728,7 +1800,7 @@ This operation SHOULD NOT be used in test files. See [collection_createChangeStr These operations and their arguments may be documented in the following specifications: - [Convenient API for Transactions](../transactions-convenient-api/transactions-convenient-api.rst) -- [Driver Sessions](../sessions/driver-sessions.rst) +- [Driver Sessions](../sessions/driver-sessions.md) Session operations that require special handling or are not documented by an existing specification are described below. @@ -1752,8 +1824,7 @@ These operations and their arguments may be documented in the following specific Bucket operations that require special handling or are not documented by an existing specification are described below. - - + #### download and downloadByName @@ -1772,14 +1843,14 @@ These operations SHOULD NOT be used in test files. See These operations SHOULD NOT be used in test files. See [download and downloadByName](#download-and-downloadbyname). - + #### openUploadStream and openUploadStreamWithId These operations SHOULD NOT be used in test files. See [IO operations for GridFS streams](#io-operations-for-gridfs-streams) in [Future Work](#future-work). - + #### upload and uploadWithId @@ -2175,7 +2246,7 @@ An example of this operation follows: Use a `listIndexes` command to check whether the index exists. Note that it is currently not possible to run `listIndexes` from within a transaction. - + #### createEntities @@ -2473,8 +2544,8 @@ The following arguments are supported: - `topologyDescription`: Required string. TopologyDescription entity whose TopologyType will be inspected. - `topologyType`: Required string. Expected TopologyType for the TopologyDescription. See - [SDAM: TopologyType](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#topologytype) for a list - of possible values. + [SDAM: TopologyType](../server-discovery-and-monitoring/server-discovery-and-monitoring.md#topologytype) for a list of + possible values. For example: @@ -2562,7 +2633,7 @@ Note: the test runner is not required to validate the type or value of a `$$plac ### Evaluating Matches Expected values in tests (e.g. [operation.expectResult](#operation_expectResult)) are expressed as either relaxed or -canonical [Extended JSON](../extended-json.rst). +canonical [Extended JSON](../extended-json.md). The algorithm for matching expected and actual values is specified with the following pseudo-code: @@ -2608,7 +2679,7 @@ The rules for comparing documents and arrays are discussed in more detail in sub *other* than documents and arrays, test runners MAY adopt any of the following approaches to compare expected and actual values, as long as they are consistent: -- Convert both values to relaxed or canonical [Extended JSON](../extended-json.rst) and compare strings +- Convert both values to relaxed or canonical [Extended JSON](../extended-json.md) and compare strings - Convert both values to BSON, and compare bytes - Convert both values to native representations, and compare accordingly @@ -2943,8 +3014,8 @@ tests. The instructions in this section apply for each test file loaded by the test runner. -Test files, which may be YAML or JSON files, MUST be interpreted using an [Extended JSON](../extended-json.rst) parser. -The parser MUST accept relaxed and canonical Extended JSON (per [Extended JSON: Parsers](../extended-json.rst#parsers)), +Test files, which may be YAML or JSON files, MUST be interpreted using an [Extended JSON](../extended-json.md) parser. +The parser MUST accept relaxed and canonical Extended JSON (per [Extended JSON: Parsers](../extended-json.md#parsers)), as test files may use either. Upon loading a file, the test runner MUST read the [schemaVersion](#schemaVersion) field and determine if the test file @@ -3055,7 +3126,7 @@ will be invoked at the end of each test and provided with the entity map (or an previously discussed in [Entity Map](#entity-map), test runners MAY restrict access to driver objects if necessary. Clear the entity map for this test. For each ClientSession in the entity map, the test runner MUST end the session (e.g. -call [endSession](../sessions/driver-sessions.rst#endsession)). For each ChangeStream and FindCursor in the entity map, +call [endSession](../sessions/driver-sessions.md#endsession)). For each ChangeStream and FindCursor in the entity map, the test runner MUST close the cursor. If the test started a transaction (i.e. executed a `startTransaction` or `withTransaction` operation), the test runner @@ -3343,7 +3414,7 @@ ignored in order to test the test runner implementation (e.g. defining entities The specification does prefer "MUST" in other contexts, such as discussing parts of the test file format that *are* enforceable by the JSON schema or the test runner implementation. - + ### Why can't `observeSensitiveCommands` be true when authentication is enabled? @@ -3417,6 +3488,10 @@ other specs *and* collating spec changes developed in parallel or during the sam ## Changelog +- 2024-05-08: **Schema version 1.21.**\ + Add `writeErrors` and `writeConcernErrors` field to `expectedError` for the + client-level bulk write API. + - 2024-04-15: Note that when `directConnection` is set to true test runners should only provide a single seed. - 2024-03-25: **Schema version 1.20.**\ diff --git a/source/uri-options/tests/README.md b/source/uri-options/tests/README.md new file mode 100644 index 0000000000..bda997051a --- /dev/null +++ b/source/uri-options/tests/README.md @@ -0,0 +1,43 @@ +# URI Options Tests + +The YAML and JSON files in this directory tree are platform-independent tests that drivers can use to prove their +conformance to the URI Options spec. + +These tests use the same format as the Connection String spec tests. + +## Version + +Files in the "specifications" repository have no version scheme. They are not tied to a MongoDB server version. + +## Format + +Each YAML file contains an object with a single `tests` key. This key is an array of test case objects, each of which +have the following keys: + +- `description`: A string describing the test. +- `uri`: A string containing the URI to be parsed. +- `valid`: A boolean indicating if the URI should be considered valid. +- `warning`: A boolean indicating whether URI parsing should emit a warning. +- `hosts`: Included for compatibility with the Connection String spec tests. This will always be `~`. +- `auth`: Included for compatibility with the Connection String spec tests. This will always be `~`. +- `options`: An object containing key/value pairs for each parsed query string option. + +If a test case includes a null value for one of these keys (e.g. `auth: ~`, `hosts: ~`), no assertion is necessary. This +both simplifies parsing of the test files (keys should always exist) and allows flexibility for drivers that might +substitute default values *during* parsing (e.g. omitted `hosts` could be parsed as `["localhost"]`). + +The `valid` and `warning` fields are boolean in order to keep the tests flexible. We are not concerned with asserting +the format of specific error or warnings messages strings. + +Under normal circumstances, it should not be necessary to specify both `valid: false` and `warning: true`. Typically, a +URI test case will either yield an error (e.g. options conflict) or a warning (e.g. invalid type or value for an +option), but not both. + +### Use as unit tests + +Testing whether a URI is valid or not requires testing whether URI parsing (or MongoClient construction) causes a +warning due to a URI option being invalid and asserting that the options parsed from the URI match those listed in the +`options` field. + +Note that there are tests for each of the options marked as optional; drivers will need to implement logic to skip over +the optional tests that they don't implement. diff --git a/source/uri-options/tests/README.rst b/source/uri-options/tests/README.rst deleted file mode 100644 index f6a128bba9..0000000000 --- a/source/uri-options/tests/README.rst +++ /dev/null @@ -1,54 +0,0 @@ -======================= -URI Options Tests -======================= - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the URI Options spec. - -These tests use the same format as the Connection String spec tests. - -Version -------- - -Files in the "specifications" repository have no version scheme. They are not -tied to a MongoDB server version. - -Format ------- - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``uri``: A string containing the URI to be parsed. -- ``valid``: A boolean indicating if the URI should be considered valid. -- ``warning``: A boolean indicating whether URI parsing should emit a warning. -- ``hosts``: Included for compatibility with the Connection String spec tests. This will always be ``~``. -- ``auth``: Included for compatibility with the Connection String spec tests. This will always be ``~``. -- ``options``: An object containing key/value pairs for each parsed query string - option. - -If a test case includes a null value for one of these keys (e.g. ``auth: ~``, -``hosts: ~``), no assertion is necessary. This both simplifies parsing of the -test files (keys should always exist) and allows flexibility for drivers that -might substitute default values *during* parsing (e.g. omitted ``hosts`` could be -parsed as ``["localhost"]``). - -The ``valid`` and ``warning`` fields are boolean in order to keep the tests -flexible. We are not concerned with asserting the format of specific error or -warnings messages strings. - -Under normal circumstances, it should not be necessary to specify both -``valid: false`` and ``warning: true``. Typically, a URI test case will either -yield an error (e.g. options conflict) or a warning (e.g. invalid type or value -for an option), but not both. - -Use as unit tests -================= - -Testing whether a URI is valid or not requires testing whether URI parsing (or -MongoClient construction) causes a warning due to a URI option being invalid and asserting that the -options parsed from the URI match those listed in the ``options`` field. - -Note that there are tests for each of the options marked as optional; drivers will need to implement -logic to skip over the optional tests that they don’t implement. diff --git a/source/uri-options/uri-options.md b/source/uri-options/uri-options.md new file mode 100644 index 0000000000..d5701f8137 --- /dev/null +++ b/source/uri-options/uri-options.md @@ -0,0 +1,220 @@ +# URI Options Specification + +- Status: Accepted + +- Minimum Server Version: N/A + +## Abstract + +Historically, URI options have been defined in individual specs, and drivers have defined any additional options +independently of one another. Because of the frustration due to there not being a single place where all of the URI +options are defined, this spec aims to do just that—namely, provide a canonical list of URI options that each driver +defines. + +**THIS SPEC DOES NOT REQUIRE DRIVERS TO MAKE ANY BREAKING CHANGES.** + +## META + +The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and +"OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt). + +## Specification + +### Conflicting TLS options + +Per the [Connection String spec](../connection-string/connection-string-spec.md#repeated-keys), the behavior of +duplicates of most URI options is undefined. However, due to the security implications of certain options, drivers MUST +raise an error to the user during parsing if any of the following circumstances occur: + +1. Both `tlsInsecure` and `tlsAllowInvalidCertificates` appear in the URI options. +2. Both `tlsInsecure` and `tlsAllowInvalidHostnames` appear in the URI options. +3. Both `tlsInsecure` and `tlsDisableOCSPEndpointCheck` appear in the URI options. +4. Both `tlsInsecure` and `tlsDisableCertificateRevocationCheck` appear in the URI options. +5. Both `tlsAllowInvalidCertificates` and `tlsDisableOCSPEndpointCheck` appear in the URI options. +6. Both `tlsAllowInvalidCertificates` and `tlsDisableCertificateRevocationCheck` appear in the URI options. +7. Both `tlsDisableOCSPEndpointCheck` and `tlsDisableCertificateRevocationCheck` appear in the URI options. +8. All instances of `tls` and `ssl` in the URI options do not have the same value. If all instances of `tls` and `ssl` + have the same value, an error MUST NOT be raised. + +### directConnection URI option with multiple seeds or SRV URI + +The driver MUST report an error if the `directConnection=true` URI option is specified with multiple seeds. + +The driver MUST report an error if the `directConnection=true` URI option is specified with an SRV URI, because the URI +may resolve to multiple hosts. The driver MUST allow specifying `directConnection=false` URI option with an SRV URI. + +### srvServiceName and srvMaxHosts URI options + +For URI option validation pertaining to `srvServiceName` and `srvMaxHosts`, please see the +[Initial DNS Seedlist Discovery spec](../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md#uri-validation) +for details. + +### Load Balancer Mode + +For URI option validation in Load Balancer mode (i.e. `loadBalanced=true`), please see the +[Load Balancer spec](../load-balancers/load-balancers.md#uri-validation) for details. + +### SOCKS5 options + +For URI option validation pertaining to `proxyHost`, `proxyPort`, `proxyUsername` and `proxyPassword` please see the +[SOCKS5 support spec](../socks5-support/socks5.rst#mongoclient-configuration) for details. + +### List of specified options + +Each driver option below MUST be implemented in each driver unless marked as optional. If an option is marked as +optional, a driver MUST meet any conditions specified for leaving it out if it is not included. If a driver already +provides the option under a different name, the driver MAY implement the old and new names as aliases. All keys and +values MUST be encoded in UTF-8. All integer options are 32-bit unless specified otherwise. Note that all requirements +and recommendations described in the [Connection String spec](../connection-string/connection-string-spec.md) pertaining +to URI options apply here. + + + +| Name | Accepted Values | Default Value | Optional to implement? | Description | +| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| appname | any string that meets the criteria listed in the [handshake spec](../mongodb-handshake/handshake.rst#client-application-name) | no appname specified | no | Passed into the server in the client metadata as part of the connection handshake | +| authMechanism | any string; valid values are defined in the [auth spec](../auth/auth.md#supported-authentication-methods) | None; default values for authentication exist for constructing authentication credentials per the [auth spec](../auth/auth.md#supported-authentication-methods), but there is no default for the URI option itself. | no | The authentication mechanism method to use for connection to the server | +| authMechanismProperties | comma separated key:value pairs, e.g. "opt1:val1,opt2:val2" | no properties specified | no | Additional options provided for authentication (e.g. to enable hostname canonicalization for GSSAPI) | +| authSource | any string | None; default values for authentication exist for constructing authentication credentials per the [auth spec](../auth/auth.md#supported-authentication-methods), but there is no default for the URI option itself. | no | The database that connections should authenticate against | +| compressors | comma separated list of strings, e.g. "snappy,zlib" | defined in [compression spec](../compression/OP_COMPRESSED.md#compressors) | no | The list of allowed compression types for wire protocol messages sent or received from the server | +| connectTimeoutMS | non-negative integer; 0 means "no timeout" | 10,000 ms (unless a driver already has a different default) | no | Amount of time to wait for a single TCP socket connection to the server to be established before erroring; note that this applies to [SDAM hello and legacy hello operations](../mongodb-handshake/handshake.rst) | +| directConnection | "true" or "false" | defined in [SDAM spec](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#initial-topology-type) | no | Whether to connect to the deployment in Single topology. | +| heartbeatFrequencyMS | integer greater than or equal to 500 | defined in [SDAM spec](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#heartbeatfrequencyms) | no | the interval between regular server monitoring checks | +| journal | "true" or "false" | no "j" field specified | no | Default write concern "j" field for the client | +| loadBalanced | "true" or "false" | defined in [Load Balancer spec](../load-balancers/load-balancers.md#loadbalanced) | no | Whether the driver is connecting to a load balancer. | +| localThresholdMS | non-negative integer; 0 means 0 ms (i.e. the fastest eligible server must be selected) | defined in the [server selection spec](../server-selection/server-selection.md#localthresholdms) | no | The amount of time beyond the fastest round trip time that a given server’s round trip time can take and still be eligible for server selection | +| maxIdleTimeMS | non-negative integer; 0 means no minimum | defined in the [Connection Pooling spec](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#connection-pool-options-1) | required for drivers with connection pools | The amount of time a connection can be idle before it's closed | +| maxPoolSize | non-negative integer; 0 means no maximum | defined in the [Connection Pooling spec](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#connection-pool-options-1) | required for drivers with connection pools | The maximum number of clients or connections able to be created by a pool at a given time. This count includes connections which are currently checked out. | +| maxConnecting | positive integer | defined in the [Connection Pooling spec](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#connection-pool-options-1) | required for drivers with connection pools | The maximum number of Connections a Pool may be establishing concurrently. | +| maxStalenessSeconds | -1 (no max staleness check) or integer >= 90 | defined in [max staleness spec](../max-staleness/max-staleness.md#api) | no | The maximum replication lag, in wall clock time, that a secondary can suffer and still be eligible for server selection | +| minPoolSize | non-negative integer | defined in the [Connection Pooling spec](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#connection-pool-options-1) | required for drivers with connection pools | The number of connections the driver should create and maintain in the pool even when no operations are occurring. This count includes connections which are currently checked out. | +| proxyHost | any string | defined in the [SOCKS5 support spec](../socks5-support/socks5.rst#mongoclient-configuration) | no | The IPv4/IPv6 address or domain name of a SOCKS5 proxy server used for connecting to MongoDB services. | +| proxyPort | non-negative integer | defined in the [SOCKS5 support spec](../socks5-support/socks5.rst#mongoclient-configuration) | no | The port of the SOCKS5 proxy server specified in `proxyHost`. | +| proxyUsername | any string | defined in the [SOCKS5 support spec](../socks5-support/socks5.rst#mongoclient-configuration) | no | The username for username/password authentication to the SOCKS5 proxy server specified in `proxyHost`. | +| proxyPassword | any string | defined in the [SOCKS5 support spec](../socks5-support/socks5.rst#mongoclient-configuration) | no | The password for username/password authentication to the SOCKS5 proxy server specified in `proxyHost`. | +| readConcernLevel | any string ([to allow for forwards compatibility with the server](../read-write-concern/read-write-concern.rst#unknown-levels-and-additional-options-for-string-based-readconcerns)) | no read concern specified | no | Default read concern for the client | +| readPreference | any string; currently supported values are defined in the [server selection spec](../server-selection/server-selection.md#mode), but must be lowercase camelCase, e.g. "primaryPreferred" | defined in [server selection spec](../server-selection/server-selection.md#mode) | no | Default read preference for the client (excluding tags) | +| readPreferenceTags | comma-separated key:value pairs (e.g. "dc:ny,rack:1" and "dc:ny)

can be specified multiple times; each instance of this key is a separate tag set | no tags specified | no | Default read preference tags for the client; only valid if the read preference mode is not primary

The order of the tag sets in the read preference is the same as the order they are specified in the URI | +| replicaSet | any string | no replica set name provided | no | The name of the replica set to connect to | +| retryReads | "true" or "false" | defined in [retryable reads spec](../retryable-reads/retryable-reads.md#retryreads) | no | Enables retryable reads on server 3.6+ | +| retryWrites | "true" or "false" | defined in [retryable writes spec](../retryable-writes/retryable-writes.md#retrywrites) | no | Enables retryable writes on server 3.6+ | +| serverMonitoringMode | "stream", "poll", or "auto" | defined in [SDAM spec](../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#servermonitoringmode) | required for multi-threaded or asynchronous drivers | Configures which server monitoring protocol to use. | +| serverSelectionTimeoutMS | positive integer; a driver may also accept 0 to be used for a special case, provided that it documents the meaning | defined in [server selection spec](../server-selection/server-selection.md#serverselectiontimeoutms) | no | A timeout in milliseconds to block for server selection before raising an error | +| serverSelectionTryOnce | "true" or "false" | defined in [server selection spec](../server-selection/server-selection.md#serverselectiontryonce) | required for single-threaded drivers | Scan the topology only once after a server selection failure instead of repeatedly until the server selection times out | +| socketTimeoutMS | non-negative integer; 0 means no timeout | no timeout | no | NOTE: This option is deprecated in favor of [timeoutMS](../client-side-operations-timeout/client-side-operations-timeout.md#timeoutms)

Amount of time spent attempting to send or receive on a socket before timing out; note that this only applies to application operations, not SDAM. | +| srvMaxHosts | non-negative integer; 0 means no maximum | defined in the [Initial DNS Seedlist Discovery spec](../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md#srvmaxhosts) | no | The maximum number of SRV results to randomly select when initially populating the seedlist or, during SRV polling, adding new hosts to the topology. | +| srvServiceName | a valid SRV service name according to [RFC 6335](https://datatracker.ietf.org/doc/html/rfc6335#section-5.1) | "mongodb" | no | the service name to use for SRV lookup in [initial DNS seedlist discovery](../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md#srvservicename) and [SRV polling](../polling-srv-records-for-mongos-discovery/polling-srv-records-for-mongos-discovery.rst) | +| ssl | "true" or "false" | same as "tls" | no | alias of "tls"; required to ensure that Atlas connection strings continue to work | +| | | | | | +| tls | "true" or "false" | TLS required if "mongodb+srv" scheme; otherwise, drivers may may enable TLS by default if other "tls"-prefixed options are present

Drivers MUST clearly document the conditions under which TLS is enabled implicitly | no | Whether or not to require TLS for connections to the server | +| tlsAllowInvalidCertificates | "true" or "false" | error on invalid certificates | required if the driver’s language/runtime allows bypassing hostname verification | Specifies whether or not the driver should error when the server’s TLS certificate is invalid | +| tlsAllowInvalidHostnames | "true" or "false" | error on invalid certificates | required if the driver’s language/runtime allows bypassing hostname verification | Specifies whether or not the driver should error when there is a mismatch between the server’s hostname and the hostname specified by the TLS certificate | +| tlsCAFile | any string | no certificate authorities specified | required if the driver's language/runtime allows non-global configuration | Path to file with either a single or bundle of certificate authorities to be considered trusted when making a TLS connection | +| tlsCertificateKeyFile | any string | no client certificate specified | required if the driver's language/runtime allows non-global configuration | Path to the client certificate file or the client private key file; in the case that they both are needed, the files should be concatenated | +| tlsCertificateKeyFilePassword | any string | no password specified | required if the driver's language/runtime allows non-global configuration | Password to decrypt the client private key to be used for TLS connections | +| tlsDisableCertificateRevocationCheck | "true" or "false" | false i.e. driver will reach check a certificate's revocation status | Yes | Controls whether or not the driver will check a certificate's revocation status via CRLs or OCSP. See the [OCSP Support Spec](../ocsp-support/ocsp-support.rst#tlsDisableCertificateRevocationCheck) for additional information. | +| tlsDisableOCSPEndpointCheck | "true" or "false" | false i.e. driver will reach out to OCSP endpoints [if needed](../ocsp-support/ocsp-support.rst#id1). | Yes | Controls whether or not the driver will reach out to OCSP endpoints if needed. See the [OCSP Support Spec](../ocsp-support/ocsp-support.rst#tlsDisableOCSPEndpointCheck) for additional information. | +| tlsInsecure | "true" or "false" | No TLS constraints are relaxed | no | Relax TLS constraints as much as possible (e.g. allowing invalid certificates or hostname mismatches); drivers must document the exact constraints which are relaxed by this option being true | +| w | non-negative integer or string | no "w" value specified | no | Default write concern "w" field for the client | +| waitQueueTimeoutMS | positive number | defined in the [Connection Pooling spec](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#connection-pool-options-1) | required for drivers with connection pools, with exceptions described in the [Connection Pooling spec](../connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#connection-pool-options-1) | NOTE: This option is deprecated in favor of [timeoutMS](../client-side-operations-timeout/client-side-operations-timeout.md#timeoutms)

Amount of time spent attempting to check out a connection from a server's connection pool before timing out | +| wTimeoutMS | non-negative 64-bit integer; 0 means no timeout | no timeout | no | NOTE: This option is deprecated in favor of [timeoutMS](../client-side-operations-timeout/client-side-operations-timeout.md#timeoutms)

Default write concern "wtimeout" field for the client | +| zlibCompressionLevel | integer between -1 and 9 (inclusive) | -1 (default compression level of the driver) | no | Specifies the level of compression when using zlib to compress wire protocol messages; -1 signifies the default level, 0 signifies no compression, 1 signifies the fastest speed, and 9 signifies the best compression | + +## Test Plan + +Tests are implemented and described in the [tests](tests/README.md) directory. + +## Design Rationale + +### Why allow drivers to provide the canonical names as aliases to existing options? + +First and foremost, this spec aims not to introduce any breaking changes to drivers. Forcing a driver to change the name +of an option that it provides will break any applications that use the old option. Moreover, it is already possible to +provide duplicate options in the URI by specifying the same option more than once; drivers can use the same semantics to +resolve the conflicts as they did before, whether it's raising an error, using the first option provided, using the last +option provided, or simply telling users that the behavior is not defined. + +### Why use "tls" as the prefix instead of "ssl" for related options? + +Technically speaking, drivers already only support TLS, which supersedes SSL. While SSL is commonly used in parlance to +refer to TLS connections, the fact remains that SSL is a weaker cryptographic protocol than TLS, and we want to +accurately reflect the strict requirements that drivers have in ensuring the security of a TLS connection. + +### Why use the names "tlsAllowInvalidHostnames" and "tlsAllowInvalidCertificates"? + +The "tls" prefix is used for the same reasons described above. The use of the terms "AllowInvalidHostnames" and +"AllowInvalidCertificates" is an intentional choice in order to convey the inherent unsafety of these options, which +should only be used for testing purposes. Additionally, both the server and the shell use "AllowInvalid" for their +equivalent options. + +### Why provide multiple implementation options for the insecure TLS options (i.e. "tlsInsecure" vs. "tlsAllowInvalidHostnames"/"tlsAllowInvalidCertificates"? + +Some TLS libraries (e.g. Go's standard library implementation) do not provide the ability to distinguish between allow +invalid certificates and hostnames, meaning they either both are allowed, or neither are. However, when more granular +options are available, it's better to expose these to the user to allow them to relax security constraints as little as +they need. + +### Why leave the decision up to drivers to enable TLS implicitly when TLS options are present? + +It can be useful to turn on TLS implicitly when options such as "tlsCAFile" are present and "tls" is not present. +However, with options such as "tlsAllowInvalidHostnames", some drivers may not have the ability to distinguish between +"false" being provided and the option not being specified. To keep the implicit enabling of TLS consistent between such +options, we defer the decision to enable TLS based on the presence of "tls"-prefixed options (besides "tls" itself) to +drivers. + +## Reference Implementations + +Ruby and Python + +## Security Implication + +Each of the "insecure" TLS options (i.e. "tlsInsecure", "tlsAllowInvalidHostnames", "tlsAllowInvalidCertificates", +"tlsDisableOCSPEndpointCheck", and "tlsDisableCertificateRevocationCheck") default to the more secure option when TLS is +enabled. In order to be backwards compatible with existing driver behavior, neither TLS nor authentication is enabled by +default. + +## Future Work + +This specification is intended to represent the current state of drivers URI options rather than be a static description +of the options at the time it was written. Whenever another specification is written or modified in a way that changes +the name or the semantics of a URI option or adds a new URI option, this specification MUST be updated to reflect those +changes. + +## Changelog + +- 2024-05-08: Migrated from reStructuredText to Markdown. + +- 2023-08-21: Add serverMonitoringMode option. + +- 2022-10-05: Remove spec front matter and reformat changelog. + +- 2022-01-19: Add the timeoutMS option and deprecate some existing timeout options + +- 2021-12-14: Add SOCKS5 options + +- 2021-11-08: Add maxConnecting option. + +- 2021-10-14: Add srvMaxHosts option. Merge headings discussing URI validation\ + for directConnection option. + +- 2021-09-15: Add srvServiceName option + +- 2021-09-13: Fix link to load balancer spec + +- 2021-04-15: Adding in behaviour for load balancer mode. + +- 2021-04-08: Updated to refer to hello and legacy hello + +- 2020-03-03: Add tlsDisableCertificateRevocationCheck option + +- 2020-02-26: Add tlsDisableOCSPEndpointCheck option + +- 2019-09-08: Add retryReads option + +- 2019-04-26: authSource and authMechanism have no default value + +- 2019-02-04: Specified errors for conflicting TLS-related URI options + +- 2019-01-25: Updated to reflect new Connection Monitoring and Pooling Spec + +______________________________________________________________________ diff --git a/source/uri-options/uri-options.rst b/source/uri-options/uri-options.rst index 8af993bc88..00c450c4a3 100644 --- a/source/uri-options/uri-options.rst +++ b/source/uri-options/uri-options.rst @@ -1,555 +1,4 @@ -========================= -URI Options Specification -========================= -:Status: Accepted -:Minimum Server Version: N/A - -**Abstract** ------------- - -Historically, URI options have been defined in individual specs, and -drivers have defined any additional options independently of one another. -Because of the frustration due to there not being a single place where -all of the URI options are defined, this spec aims to do just that—namely, -provide a canonical list of URI options that each driver defines. - -**THIS SPEC DOES NOT REQUIRE DRIVERS TO MAKE ANY BREAKING CHANGES.** - -**META** --------- - -The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", -"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this -document are to be interpreted as described in -`RFC 2119 `_. - -**Specification** ------------------ - -Conflicting TLS options -~~~~~~~~~~~~~~~~~~~~~~~ - -Per the `Connection String spec <../connection-string/connection-string-spec.md#repeated-keys>`__, -the behavior of duplicates of most URI options is undefined. However, due -to the security implications of certain options, drivers MUST raise an -error to the user during parsing if any of the following circumstances -occur: - -1. Both ``tlsInsecure`` and ``tlsAllowInvalidCertificates`` appear in the - URI options. -2. Both ``tlsInsecure`` and ``tlsAllowInvalidHostnames`` appear in the - URI options. -3. Both ``tlsInsecure`` and ``tlsDisableOCSPEndpointCheck`` appear in - the URI options. -4. Both ``tlsInsecure`` and ``tlsDisableCertificateRevocationCheck`` - appear in the URI options. -5. Both ``tlsAllowInvalidCertificates`` and - ``tlsDisableOCSPEndpointCheck`` appear in the URI options. -6. Both ``tlsAllowInvalidCertificates`` and - ``tlsDisableCertificateRevocationCheck`` appear in the URI options. -7. Both ``tlsDisableOCSPEndpointCheck`` and - ``tlsDisableCertificateRevocationCheck`` appear in the URI options. -8. All instances of ``tls`` and ``ssl`` in the URI options do not have the - same value. If all instances of ``tls`` and ``ssl`` have the same - value, an error MUST NOT be raised. - - -directConnection URI option with multiple seeds or SRV URI -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The driver MUST report an error if the ``directConnection=true`` URI option -is specified with multiple seeds. - -The driver MUST report an error if the ``directConnection=true`` URI option -is specified with an SRV URI, because the URI may resolve to multiple -hosts. The driver MUST allow specifying ``directConnection=false`` URI option -with an SRV URI. - - -srvServiceName and srvMaxHosts URI options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For URI option validation pertaining to ``srvServiceName`` and ``srvMaxHosts``, -please see the -`Initial DNS Seedlist Discovery spec <../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md#uri-validation>`_ -for details. - - -Load Balancer Mode -~~~~~~~~~~~~~~~~~~ - -For URI option validation in Load Balancer mode (i.e. ``loadBalanced=true``), -please see the -`Load Balancer spec <../load-balancers/load-balancers.md#uri-validation>`_ for -details. - - -SOCKS5 options -~~~~~~~~~~~~~~ - -For URI option validation pertaining to ``proxyHost``, ``proxyPort``, -``proxyUsername`` and ``proxyPassword`` please see the -`SOCKS5 support spec`_ for details. - - -List of specified options -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Each driver option below MUST be implemented in each driver unless marked -as optional. If an option is marked as optional, a driver MUST meet any -conditions specified for leaving it out if it is not included. If a driver -already provides the option under a different name, the driver MAY -implement the old and new names as aliases. All keys and values MUST be -encoded in UTF-8. All integer options are 32-bit unless specified otherwise. -Note that all requirements and recommendations described in the `Connection -String spec -<../connection-string/connection-string-spec.md>`_ -pertaining to URI options apply here. - -.. _uri.options: - -.. list-table:: - :header-rows: 1 - :widths: 1 1 1 1 1 - - * - Name - - Accepted Values - - Default Value - - Optional to implement? - - Description - - * - appname - - any string that meets the criteria listed in the `handshake spec - `_ - - no appname specified - - no - - Passed into the server in the client metadata as part of the - connection handshake - - * - authMechanism - - any string; valid values are defined in the `auth spec - <../auth/auth.md#supported-authentication-methods>`_ - - None; default values for authentication exist for constructing authentication credentials per the - `auth spec <../auth/auth.md#supported-authentication-methods>`_, - but there is no default for the URI option itself. - - no - - The authentication mechanism method to use for connection to the - server - - * - authMechanismProperties - - comma separated key:value pairs, e.g. "opt1:val1,opt2:val2" - - no properties specified - - no - - Additional options provided for authentication (e.g. to enable hostname canonicalization for GSSAPI) - - * - authSource - - any string - - None; default values for authentication exist for constructing authentication credentials per the - `auth spec <../auth/auth.md#supported-authentication-methods>`_, - but there is no default for the URI option itself. - - no - - The database that connections should authenticate against - - * - compressors - - comma separated list of strings, e.g. "snappy,zlib" - - defined in `compression spec <../compression/OP_COMPRESSED.md#compressors>`_ - - no - - The list of allowed compression types for wire protocol messages - sent or received from the server - - * - connectTimeoutMS - - non-negative integer; 0 means "no timeout" - - 10,000 ms (unless a driver already has a different default) - - no - - Amount of time to wait for a single TCP socket connection to the - server to be established before erroring; note that this applies to - `SDAM hello and legacy hello operations `_ - - * - directConnection - - "true" or "false" - - defined in `SDAM spec `__ - - no - - Whether to connect to the deployment in Single topology. - - * - heartbeatFrequencyMS - - integer greater than or equal to 500 - - defined in `SDAM spec `__ - - no - - the interval between regular server monitoring checks - - * - journal - - "true" or "false" - - no "j" field specified - - no - - Default write concern "j" field for the client - - * - loadBalanced - - "true" or "false" - - defined in `Load Balancer spec <../load-balancers/load-balancers.md#loadbalanced>`__ - - no - - Whether the driver is connecting to a load balancer. - - * - localThresholdMS - - non-negative integer; 0 means 0 ms (i.e. the fastest eligible server - must be selected) - - defined in the `server selection spec <../server-selection/server-selection.md#localthresholdms>`__ - - no - - The amount of time beyond the fastest round trip time that a given - server’s round trip time can take and still be eligible for server selection - - * - maxIdleTimeMS - - non-negative integer; 0 means no minimum - - defined in the `Connection Pooling spec`_ - - required for drivers with connection pools - - The amount of time a connection can be idle before it's closed - - * - maxPoolSize - - non-negative integer; 0 means no maximum - - defined in the `Connection Pooling spec`_ - - required for drivers with connection pools - - The maximum number of clients or connections able to be created by a pool at a given time. This count includes connections which are currently checked out. - - * - maxConnecting - - positive integer - - defined in the `Connection Pooling spec`_ - - required for drivers with connection pools - - The maximum number of Connections a Pool may be establishing concurrently. - - * - maxStalenessSeconds - - -1 (no max staleness check) or integer >= 90 - - defined in `max staleness spec <../max-staleness/max-staleness.md#api>`_ - - no - - The maximum replication lag, in wall clock time, that a secondary can suffer and still be eligible for server selection - - * - minPoolSize - - non-negative integer - - defined in the `Connection Pooling spec`_ - - required for drivers with connection pools - - The number of connections the driver should create and maintain in the pool even when no operations are occurring. This count includes connections which are currently checked out. - - * - proxyHost - - any string - - defined in the `SOCKS5 support spec`_ - - no - - The IPv4/IPv6 address or domain name of a SOCKS5 proxy server used for connecting to MongoDB services. - - * - proxyPort - - non-negative integer - - defined in the `SOCKS5 support spec`_ - - no - - The port of the SOCKS5 proxy server specified in ``proxyHost``. - - * - proxyUsername - - any string - - defined in the `SOCKS5 support spec`_ - - no - - The username for username/password authentication to the SOCKS5 proxy server specified in ``proxyHost``. - - * - proxyPassword - - any string - - defined in the `SOCKS5 support spec`_ - - no - - The password for username/password authentication to the SOCKS5 proxy server specified in ``proxyHost``. - - * - readConcernLevel - - any string (`to allow for forwards compatibility with the server `_) - - no read concern specified - - no - - Default read concern for the client - - * - readPreference - - any string; currently supported values are defined in the `server selection spec <../server-selection/server-selection.md#mode>`__, but must be lowercase camelCase, e.g. "primaryPreferred" - - defined in `server selection spec <../server-selection/server-selection.md#mode>`__ - - no - - Default read preference for the client (excluding tags) - - * - readPreferenceTags - - comma-separated key:value pairs (e.g. "dc:ny,rack:1" and "dc:ny) - - can be specified multiple times; each instance of this key is a - separate tag set - - no tags specified - - no - - Default read preference tags for the client; only valid if the read preference mode is not primary - - The order of the tag sets in the read preference is the same as the order they are specified in the URI - - * - replicaSet - - any string - - no replica set name provided - - no - - The name of the replica set to connect to - - * - retryReads - - "true" or "false" - - defined in `retryable reads spec <../retryable-reads/retryable-reads.md#retryreads>`_ - - no - - Enables retryable reads on server 3.6+ - - * - retryWrites - - "true" or "false" - - defined in `retryable writes spec <../retryable-writes/retryable-writes.md#retrywrites>`_ - - no - - Enables retryable writes on server 3.6+ - - * - serverMonitoringMode - - "stream", "poll", or "auto" - - defined in `SDAM spec `__ - - required for multi-threaded or asynchronous drivers - - Configures which server monitoring protocol to use. - - * - serverSelectionTimeoutMS - - positive integer; a driver may also accept 0 to be used for a special case, provided that it documents the meaning - - defined in `server selection spec <../server-selection/server-selection.md#serverselectiontimeoutms>`__ - - no - - A timeout in milliseconds to block for server selection before raising an error - - * - serverSelectionTryOnce - - "true" or "false" - - defined in `server selection spec <../server-selection/server-selection.md#serverselectiontryonce>`__ - - required for single-threaded drivers - - Scan the topology only once after a server selection failure instead of repeatedly until the server selection times out - - * - socketTimeoutMS - - non-negative integer; 0 means no timeout - - no timeout - - no - - NOTE: This option is deprecated in favor of `timeoutMS <../client-side-operations-timeout/client-side-operations-timeout.md#timeoutms>`__ - - Amount of time spent attempting to send or receive on a socket before timing out; note that this only applies to application operations, not SDAM. - - * - srvMaxHosts - - non-negative integer; 0 means no maximum - - defined in the `Initial DNS Seedlist Discovery spec <../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md#srvmaxhosts>`__ - - no - - The maximum number of SRV results to randomly select when initially - populating the seedlist or, during SRV polling, adding new hosts to the - topology. - - * - srvServiceName - - a valid SRV service name according to `RFC 6335 `_ - - "mongodb" - - no - - the service name to use for SRV lookup in `initial DNS seedlist discovery <../initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md#srvservicename>`__ - and `SRV polling <../polling-srv-records-for-mongos-discovery/polling-srv-records-for-mongos-discovery.rst>`_ - - * - ssl - - "true" or "false" - - same as "tls" - - no - - alias of "tls"; required to ensure that Atlas connection strings continue to work - - * - - timeoutMS - - non-negative integer; 0 or unset means no timeout - - Defined in `Client Side Operations Timeout: timeoutMS <../client-side-operations-timeout/client-side-operations-timeout.md#timeoutms>`_. - - no - - Time limit for the full execution of an operation - - * - tls - - "true" or "false" - - TLS required if "mongodb+srv" scheme; otherwise, drivers may may enable TLS by default if other "tls"-prefixed options are present - - - Drivers MUST clearly document the conditions under which TLS is enabled implicitly - - no - - Whether or not to require TLS for connections to the server - - - * - tlsAllowInvalidCertificates - - "true" or "false" - - error on invalid certificates - - required if the driver’s language/runtime allows bypassing hostname verification - - Specifies whether or not the driver should error when the server’s TLS certificate is invalid - - * - tlsAllowInvalidHostnames - - "true" or "false" - - error on invalid certificates - - required if the driver’s language/runtime allows bypassing hostname verification - - Specifies whether or not the driver should error when there is a mismatch between the server’s hostname and the hostname specified by the TLS certificate - - * - tlsCAFile - - any string - - no certificate authorities specified - - required if the driver's language/runtime allows non-global configuration - - Path to file with either a single or bundle of certificate authorities to be considered trusted when making a TLS connection - - * - tlsCertificateKeyFile - - any string - - no client certificate specified - - required if the driver's language/runtime allows non-global configuration - - Path to the client certificate file or the client private key file; in the case that they both are needed, the files should be concatenated - - * - tlsCertificateKeyFilePassword - - any string - - no password specified - - required if the driver's language/runtime allows non-global configuration - - Password to decrypt the client private key to be used for TLS connections - - * - tlsDisableCertificateRevocationCheck - - "true" or "false" - - false i.e. driver will reach check a certificate's revocation status - - Yes - - Controls whether or not the driver will check a certificate's - revocation status via CRLs or OCSP. See the `OCSP Support Spec - <../ocsp-support/ocsp-support.rst#tlsDisableCertificateRevocationCheck>`__ - for additional information. - - * - tlsDisableOCSPEndpointCheck - - "true" or "false" - - false i.e. driver will reach out to OCSP endpoints `if needed - <../ocsp-support/ocsp-support.rst#id1>`__. - - Yes - - Controls whether or not the driver will reach out to OCSP - endpoints if needed. See the `OCSP Support Spec - <../ocsp-support/ocsp-support.rst#tlsDisableOCSPEndpointCheck>`__ - for additional information. - - * - tlsInsecure - - "true" or "false" - - No TLS constraints are relaxed - - no - - Relax TLS constraints as much as possible (e.g. allowing invalid certificates or hostname mismatches); drivers must document the exact constraints which are relaxed by this option being true - - * - w - - non-negative integer or string - - no "w" value specified - - no - - Default write concern "w" field for the client - - * - waitQueueTimeoutMS - - positive number - - defined in the `Connection Pooling spec`_ - - required for drivers with connection pools, with exceptions described in the `Connection Pooling spec`_ - - NOTE: This option is deprecated in favor of `timeoutMS <../client-side-operations-timeout/client-side-operations-timeout.md#timeoutms>`_ - - Amount of time spent attempting to check out a connection from a server's - connection pool before timing out - - * - wTimeoutMS - - non-negative 64-bit integer; 0 means no timeout - - no timeout - - no - - NOTE: This option is deprecated in favor of `timeoutMS <../client-side-operations-timeout/client-side-operations-timeout.md#timeoutms>`_ - - Default write concern "wtimeout" field for the client - - * - zlibCompressionLevel - - integer between -1 and 9 (inclusive) - - -1 (default compression level of the driver) - - no - - Specifies the level of compression when using zlib to compress wire - protocol messages; -1 signifies the default level, 0 signifies no - compression, 1 signifies the fastest speed, and 9 signifies the - best compression - -**Test Plan** -------------- - -Tests are implemented and described in the `tests `_ directory - -**Design Rationale** ---------------------- - -Why allow drivers to provide the canonical names as aliases to existing options? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First and foremost, this spec aims not to introduce any breaking changes -to drivers. Forcing a driver to change the name of an option that it -provides will break any applications that use the old option. Moreover, it -is already possible to provide duplicate options in the URI by specifying -the same option more than once; drivers can use the same semantics to -resolve the conflicts as they did before, whether it’s raising an error, -using the first option provided, using the last option provided, or simply -telling users that the behavior is not defined. - -Why use "tls" as the prefix instead of "ssl" for related options? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Technically speaking, drivers already only support TLS, which supersedes -SSL. While SSL is commonly used in parlance to refer to TLS connections, -the fact remains that SSL is a weaker cryptographic protocol than TLS, and -we want to accurately reflect the strict requirements that drivers have in -ensuring the security of a TLS connection. - -Why use the names "tlsAllowInvalidHostnames" and "tlsAllowInvalidCertificates"? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The "tls" prefix is used for the same reasons described above. The use of the -terms "AllowInvalidHostnames" and "AllowInvalidCertificates" is an intentional -choice in order to convey the inherent unsafety of these options, which should -only be used for testing purposes. Additionally, both the server and the shell -use "AllowInvalid" for their equivalent options. - -Why provide multiple implementation options for the insecure TLS options (i.e. "tlsInsecure" vs. "tlsAllowInvalidHostnames"/"tlsAllowInvalidCertificates"? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Some TLS libraries (e.g. Go’s standard library implementation) do not provide -the ability to distinguish between allow invalid certificates and hostnames, -meaning they either both are allowed, or neither are. However, when more -granular options are available, it’s better to expose these to the user to -allow them to relax security constraints as little as they need. - - -Why leave the decision up to drivers to enable TLS implicitly when TLS options are present? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It can be useful to turn on TLS implicitly when options such as "tlsCAFile" are -present and "tls" is not present. However, with options such as -"tlsAllowInvalidHostnames", some drivers may not have the ability to -distinguish between "false" being provided and the option not being specified. -To keep the implicit enabling of TLS consistent between such options, we defer -the decision to enable TLS based on the presence of "tls"-prefixed options -(besides "tls" itself) to drivers. - -**Reference Implementations** ------------------------------ - -Ruby and Python - -**Security Implication** ------------------------- - -Each of the "insecure" TLS options (i.e. "tlsInsecure", -"tlsAllowInvalidHostnames", "tlsAllowInvalidCertificates", -"tlsDisableOCSPEndpointCheck", and -"tlsDisableCertificateRevocationCheck") default to the more secure -option when TLS is enabled. In order to be backwards compatible with -existing driver behavior, neither TLS nor authentication is enabled by -default. - -**Future Work** ---------------- - -This specification is intended to represent the current state of drivers URI -options rather than be a static description of the options at the time it was -written. Whenever another specification is written or modified in a way that -changes the name or the semantics of a URI option or adds a new URI option, -this specification MUST be updated to reflect those changes. - -Changelog ---------- - -:2023-08-21: Add serverMonitoringMode option. -:2022-10-05: Remove spec front matter and reformat changelog. -:2022-01-19: Add the timeoutMS option and deprecate some existing timeout options -:2021-12-14: Add SOCKS5 options -:2021-11-08: Add maxConnecting option. -:2021-10-14: Add srvMaxHosts option. Merge headings discussing URI validation - for directConnection option. -:2021-09-15: Add srvServiceName option -:2021-09-13: Fix link to load balancer spec -:2021-04-15: Adding in behaviour for load balancer mode. -:2021-04-08: Updated to refer to hello and legacy hello -:2020-03-03: Add tlsDisableCertificateRevocationCheck option -:2020-02-26: Add tlsDisableOCSPEndpointCheck option -:2019-09-08: Add retryReads option -:2019-04-26: authSource and authMechanism have no default value -:2019-02-04: Specified errors for conflicting TLS-related URI options -:2019-01-25: Updated to reflect new Connection Monitoring and Pooling Spec - ----- - -.. _Connection Pooling spec: https://github.com/mongodb/specifications/blob/master/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.md#connection-pool-options-1 -.. _SOCKS5 support spec: https://github.com/mongodb/specifications/blob/master/source/socks5-support/socks5.rst#mongoclient-configuration +.. note:: + This specification has been converted to Markdown and renamed to + `uri-options.md `_. diff --git a/source/versioned-api/tests/crud-api-version-1.json b/source/versioned-api/tests/crud-api-version-1.json index a387d0587e..fe668620f8 100644 --- a/source/versioned-api/tests/crud-api-version-1.json +++ b/source/versioned-api/tests/crud-api-version-1.json @@ -50,7 +50,8 @@ }, "apiDeprecationErrors": true } - ] + ], + "namespace": "versioned-api-tests.test" }, "initialData": [ { @@ -426,6 +427,85 @@ } ] }, + { + "description": "client bulkWrite appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "versioned-api-tests.test", + "document": { + "_id": 6, + "x": 6 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 6 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 6, + "x": 6 + } + } + ], + "nsInfo": [ + { + "ns": "versioned-api-tests.test" + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, { "description": "countDocuments appends declared API version", "operations": [ diff --git a/source/versioned-api/tests/crud-api-version-1.yml b/source/versioned-api/tests/crud-api-version-1.yml index 50135c1458..cb9b45e57b 100644 --- a/source/versioned-api/tests/crud-api-version-1.yml +++ b/source/versioned-api/tests/crud-api-version-1.yml @@ -34,6 +34,7 @@ _yamlAnchors: apiVersion: "1" apiStrict: { $$unsetOrMatches: false } apiDeprecationErrors: true + namespace: &namespace "versioned-api-tests.test" initialData: - collectionName: *collectionName @@ -155,6 +156,46 @@ tests: multi: { $$unsetOrMatches: false } upsert: true <<: *expectedApiVersion + + - description: "client bulkWrite appends declared API version" + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + operations: + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 6, x: 6 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 6 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 6, x: 6 } + nsInfo: + - { ns: *namespace } + <<: *expectedApiVersion - description: "countDocuments appends declared API version" operations: diff --git a/source/wireversion-featurelist.md b/source/wireversion-featurelist.md new file mode 100644 index 0000000000..ee31fd74a6 --- /dev/null +++ b/source/wireversion-featurelist.md @@ -0,0 +1,33 @@ +# Server Wire version and Feature List + +| Server version | Wire version | Feature List | +| -------------- | ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 2.6 | 1 |

Aggregation cursor

Auth commands

| +| 2.6 | 2 |

Write commands (insert/update/delete)

Aggregation $out pipeline operator

| +| 3.0 | 3 |

listCollections

listIndexes

SCRAM-SHA-1

explain command

| +| 3.2 | 4 |

(find/getMore/killCursors) commands

currentOp command

fsyncUnlock command

findAndModify take write concern

Commands take read concern

Document-level validation

explain command supports distinct and findAndModify

| +| 3.4 | 5 |

Commands take write concern

Commands take collation

| +| 3.6 | 6 |

Supports OP_MSG

Collection-level ChangeStream support

Retryable Writes

Causally Consistent Reads

Logical Sessions

update "arrayFilters" option

| +| 4.0 | 7 |

ReplicaSet transactions

Database and cluster-level change streams and startAtOperationTime option

| +| 4.2 | 8 |

Sharded transactions

Aggregation $merge pipeline operator

update "hint" option

| +| 4.4 | 9 |

Streaming protocol for SDAM

ResumableChangeStreamError error label

delete "hint" option

findAndModify "hint" option

createIndexes "commitQuorum" option

| +| 5.0 | 13 | $out and $merge on secondaries (technically FCV 4.4+) | +| 5.1 | 14 | | +| 5.2 | 15 | | +| 5.3 | 16 | | +| 6.0 | 17 |

Support for Partial Indexes

Sharded Time Series Collections

FCV set to 5.0

| +| 6.1 | 18 |

Update Perl Compatible Regular Expressions version to PCRE2

Add `*UCP` option for regex queries

| +| 6.2 | 19 |

Collection validation ensures BSON documents conform to BSON spec

Collection validation checks time series collections for internal consistency

| +| 7.0 | 21 |

Atlas Search Index Management

`$currentOp` aggregation Metrics

Compound Wildcard Indexes

Support large change stream events via `$changeStreamSplitLargeEvent` stage

`serverStatus` output gets new fields

Slot Based Query Execution

| +| 7.1 | 22 |

Improved Index Builds

Exhaust Cursors Enabled for Sharded Clusters

New Sharding Statistics for Chunk Migrations

Self-Managed Backups of Sharded Clusters | +| 7.2 | 23 |

Database Validation on `mongos` Aggregation Queries

`serverStatus` Metrics

Default Chunks Per Shard

| +| 7.3 | 24 |

Compaction Improvements

New `serverStatus` metrics

| +| 8.0 | 25 |

Range Encryption GA

OIDC authentication mechanism

New `bulkWrite` command

`snapshot` read concern on capped collections

| + +In server versions 5.0 and earlier, the wire version was defined as a numeric literal in +[src/mongo/db/wire_version.h](https://github.com/mongodb/mongo/blob/master/src/mongo/db/wire_version.h). Since server +version 5.1 ([SERVER-58346](https://jira.mongodb.org/browse/SERVER-58346)), the wire version is derived from the number +of releases since 4.0 (using +[src/mongo/util/version/releases.h.tpl](https://github.com/mongodb/mongo/blob/master/src/mongo/util/version/releases.h.tpl) +and +[src/mongo/util/version/releases.yml](https://github.com/mongodb/mongo/blob/master/src/mongo/util/version/releases.yml)). diff --git a/source/wireversion-featurelist.rst b/source/wireversion-featurelist.rst index 43521384fc..a7ebe4dece 100644 --- a/source/wireversion-featurelist.rst +++ b/source/wireversion-featurelist.rst @@ -1,88 +1,4 @@ -==================================== -Server Wire version and Feature List -==================================== -.. list-table:: - :header-rows: 1 - - * - Server version - - Wire version - - Feature List - - * - 2.6 - - 1 - - | Aggregation cursor - | Auth commands - - * - 2.6 - - 2 - - | Write commands (insert/update/delete) - | Aggregation $out pipeline operator - - * - 3.0 - - 3 - - | listCollections - | listIndexes - | SCRAM-SHA-1 - | explain command - - * - 3.2 - - 4 - - | (find/getMore/killCursors) commands - | currentOp command - | fsyncUnlock command - | findAndModify take write concern - | Commands take read concern - | Document-level validation - | explain command supports distinct and findAndModify - - * - 3.4 - - 5 - - | Commands take write concern - | Commands take collation - - * - 3.6 - - 6 - - | Supports OP_MSG - | Collection-level ChangeStream support - | Retryable Writes - | Causally Consistent Reads - | Logical Sessions - | update "arrayFilters" option - - * - 4.0 - - 7 - - | ReplicaSet transactions - | Database and cluster-level change streams and startAtOperationTime option - - * - 4.2 - - 8 - - | Sharded transactions - | Aggregation $merge pipeline operator - | update "hint" option - - * - 4.4 - - 9 - - | Streaming protocol for SDAM - | ResumableChangeStreamError error label - | delete "hint" option - | findAndModify "hint" option - | createIndexes "commitQuorum" option - - * - 5.0 - - 13 - - | $out and $merge on secondaries (technically FCV 4.4+) - - * - 5.1 - - 14 - - | - - * - 5.2 - - 15 - - | - - * - 5.3 - - 16 - - | - -In server versions 5.0 and earlier, the wire version was defined as a numeric literal in `src/mongo/db/wire_version.h `_. Since server version 5.1 (`SERVER-58346 `_), the wire version is derived from the number of releases since 4.0 (using `src/mongo/util/version/releases.h.tpl `_ and `src/mongo/util/version/releases.yml `_). +.. note:: + This specification has been converted to Markdown and renamed to + `wireversion-featurelist.md `_.