diff --git a/results.json b/results.json new file mode 100644 index 00000000000..3db8d7c1650 --- /dev/null +++ b/results.json @@ -0,0 +1,58 @@ +[ + { + "info": { + "test_name": "findManyAndToArrayOld", + "tags": [ + "js-bson" + ] + }, + "metrics": [ + { + "name": "megabytes_per_second", + "value": 88.05271598139723 + } + ] + }, + { + "info": { + "test_name": "findManyAndToArray", + "tags": [ + "js-bson" + ] + }, + "metrics": [ + { + "name": "megabytes_per_second", + "value": 89.03528881742793 + } + ] + }, + { + "info": { + "test_name": "findManyAndToArrayOldWithTransform", + "tags": [ + "js-bson" + ] + }, + "metrics": [ + { + "name": "megabytes_per_second", + "value": 86.97311297516816 + } + ] + }, + { + "info": { + "test_name": "findManyAndToArrayWithTransform", + "tags": [ + "js-bson" + ] + }, + "metrics": [ + { + "name": "megabytes_per_second", + "value": 89.91431914455787 + } + ] + } +] \ No newline at end of file diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index a9246398cd4..620bc70731f 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -457,10 +457,8 @@ export abstract class AbstractCursor< * results when this cursor had been previously accessed. In that case, * cursor.rewind() can be used to reset the cursor. */ - async toArray(transform_temp?: (doc: TSchema) => any): Promise { + async toArray(): Promise { const array: TSchema[] = []; - - this.transform = transform_temp; // at the end of the loop (since readBufferedDocuments is called) the buffer will be empty // then, the 'await of' syntax will run a getMore call for await (const document of this) { diff --git a/test/benchmarks/driverBench/index.js b/test/benchmarks/driverBench/index.js index 0b1986455b9..b15b8708e32 100644 --- a/test/benchmarks/driverBench/index.js +++ b/test/benchmarks/driverBench/index.js @@ -10,7 +10,7 @@ let bsonType = 'js-bson'; const { inspect } = require('util'); const { writeFile } = require('fs/promises'); -const { makeParallelBenchmarks, makeSingleBench, makeMultiBench } = require('../mongoBench/suites'); +const { makeMultiBench } = require('../mongoBench/suites'); const hw = os.cpus(); const ram = os.totalmem() / 1024 ** 3; @@ -26,61 +26,12 @@ const systemInfo = () => ].join('\n'); console.log(systemInfo()); -function average(arr) { - return arr.reduce((x, y) => x + y, 0) / arr.length; -} - -const benchmarkRunner = new Runner() - .suite('singleBench', suite => makeSingleBench(suite)) - .suite('multiBench', suite => makeMultiBench(suite)) - .suite('parallel', suite => makeParallelBenchmarks(suite)); +const benchmarkRunner = new Runner().suite('multiBench', suite => makeMultiBench(suite)); benchmarkRunner .run() .then(microBench => { - const singleBench = average([ - microBench.singleBench.findOne, - microBench.singleBench.smallDocInsertOne, - microBench.singleBench.largeDocInsertOne - ]); - const multiBench = average(Object.values(microBench.multiBench)); - - const parallelBench = average([ - microBench.parallel.ldjsonMultiFileUpload, - microBench.parallel.ldjsonMultiFileExport, - microBench.parallel.gridfsMultiFileUpload, - microBench.parallel.gridfsMultiFileDownload - ]); - - const readBench = average([ - microBench.singleBench.findOne, - microBench.multiBench.findManyAndEmptyCursor, - microBench.multiBench.gridFsDownload, - microBench.parallel.gridfsMultiFileDownload, - microBench.parallel.ldjsonMultiFileExport - ]); - const writeBench = average([ - microBench.singleBench.smallDocInsertOne, - microBench.singleBench.largeDocInsertOne, - microBench.multiBench.smallDocBulkInsert, - microBench.multiBench.largeDocBulkInsert, - microBench.multiBench.gridFsUpload, - microBench.parallel.ldjsonMultiFileUpload, - microBench.parallel.gridfsMultiFileUpload - ]); - - const driverBench = average([readBench, writeBench]); - const benchmarkResults = { - singleBench, - multiBench, - parallelBench, - readBench, - writeBench, - driverBench, - ...microBench.parallel, - ...microBench.bsonBench, - ...microBench.singleBench, ...microBench.multiBench }; diff --git a/test/benchmarks/mongoBench/suites/multiBench.js b/test/benchmarks/mongoBench/suites/multiBench.js index ae1f921f948..3ec103424fa 100644 --- a/test/benchmarks/mongoBench/suites/multiBench.js +++ b/test/benchmarks/mongoBench/suites/multiBench.js @@ -1,7 +1,4 @@ -const { Readable } = require('stream'); -const { pipeline } = require('stream/promises'); const { - loadSpecFile, makeLoadJSON, makeClient, connectClient, @@ -9,32 +6,12 @@ const { dropDb, initCollection, makeLoadTweets, - disconnectClient, - makeLoadInsertDocs, - createCollection, - dropCollection, - dropBucket, - initBucket, - writeSingleByteFileToBucket + disconnectClient } = require('../../driverBench/common'); -function loadGridFs() { - this.bin = loadSpecFile(['single_and_multi_document', 'gridfs_large.bin']); -} - -function gridFsInitUploadStream() { - this.uploadStream = this.bucket.openUploadStream('gridfstest'); -} - -async function gridFsUpload() { - const uploadData = Readable.from(this.bin); - const uploadStream = this.uploadStream; - await pipeline(uploadData, uploadStream); -} - function makeMultiBench(suite) { return suite - .benchmark('findManyAndEmptyCursor', benchmark => + .benchmark('findManyAndToArrayOld', benchmark => benchmark .taskSize(16.22) .setup(makeLoadJSON('tweet.json')) @@ -45,102 +22,44 @@ function makeMultiBench(suite) { .setup(initCollection) .setup(makeLoadTweets(false)) .task(async function () { - // eslint-disable-next-line no-unused-vars - for await (const _ of this.collection.find({})) { - // do nothing - } + await this.collection.find({}).toArrayOld(); }) .teardown(dropDb) .teardown(disconnectClient) ) - .benchmark('smallDocBulkInsert', benchmark => + .benchmark('findManyAndToArray', benchmark => benchmark - .taskSize(2.75) - .setup(makeLoadJSON('small_doc.json')) - .setup(makeLoadInsertDocs(10000)) + .taskSize(16.22) + .setup(makeLoadJSON('tweet.json')) .setup(makeClient) .setup(connectClient) .setup(initDb) .setup(dropDb) - .setup(initDb) .setup(initCollection) - .setup(createCollection) - .beforeTask(dropCollection) - .beforeTask(createCollection) - .beforeTask(initCollection) - .task(async function () { - await this.collection.insertMany(this.docs, { ordered: true }); - }) - .teardown(dropDb) - .teardown(disconnectClient) - ) - .benchmark('largeDocBulkInsert', benchmark => - benchmark - .taskSize(27.31) - .setup(makeLoadJSON('large_doc.json')) - .setup(makeLoadInsertDocs(10)) - .setup(makeClient) - .setup(connectClient) - .setup(initDb) - .setup(dropDb) - .setup(initDb) - .setup(initCollection) - .setup(createCollection) - .beforeTask(dropCollection) - .beforeTask(createCollection) - .beforeTask(initCollection) + .setup(makeLoadTweets(false)) .task(async function () { - await this.collection.insertMany(this.docs, { ordered: true }); + await this.collection.find({}).toArray(); }) .teardown(dropDb) .teardown(disconnectClient) ) - .benchmark('gridFsUpload', benchmark => - benchmark - .taskSize(52.43) - .setup(loadGridFs) - .setup(makeClient) - .setup(connectClient) - .setup(initDb) - .setup(dropDb) - .setup(initDb) - .setup(initCollection) - .beforeTask(dropBucket) - .beforeTask(initBucket) - .beforeTask(gridFsInitUploadStream) - .beforeTask(writeSingleByteFileToBucket) - .task(gridFsUpload) - .teardown(dropDb) - .teardown(disconnectClient) - ) - .benchmark('gridFsDownload', benchmark => + .benchmark('findManyAndToArrayOldWithTransform', benchmark => benchmark - .taskSize(52.43) - .setup(loadGridFs) + .taskSize(16.22) + .setup(makeLoadJSON('tweet.json')) .setup(makeClient) .setup(connectClient) .setup(initDb) .setup(dropDb) - .setup(initDb) .setup(initCollection) - .setup(dropBucket) - .setup(initBucket) - .setup(gridFsInitUploadStream) - .setup(async function () { - await gridFsUpload.call(this); - this.id = this.uploadStream.id; - this.uploadData = undefined; - }) + .setup(makeLoadTweets(false)) .task(async function () { - // eslint-disable-next-line no-unused-vars - for await (const _ of this.bucket.openDownloadStream(this.id)) { - // do nothing - } + await this.collection.find({}).toArrayOld(doc => [doc, doc]); }) .teardown(dropDb) .teardown(disconnectClient) ) - .benchmark('findManyAndToArray', benchmark => + .benchmark('findManyAndToArrayWithTransform', benchmark => benchmark .taskSize(16.22) .setup(makeLoadJSON('tweet.json')) @@ -151,7 +70,7 @@ function makeMultiBench(suite) { .setup(initCollection) .setup(makeLoadTweets(false)) .task(async function () { - await this.collection.find({}).toArray(); + await this.collection.find({}).toArray(doc => [doc, doc]); }) .teardown(dropDb) .teardown(disconnectClient) diff --git a/test/tools/cluster_setup.sh b/test/tools/cluster_setup.sh index 65073216457..b9f62496292 100755 --- a/test/tools/cluster_setup.sh +++ b/test/tools/cluster_setup.sh @@ -21,8 +21,8 @@ elif [[ $1 == "sharded_cluster" ]]; then echo "mongodb://bob:pwd123@localhost:51000,localhost:51001" elif [[ $1 == "server" ]]; then mkdir -p $SINGLE_DIR - mlaunch init --dir $SINGLE_DIR --ipv6 --auth --username "bob" --password "pwd123" --single --setParameter enableTestCommands=1 - echo "mongodb://bob:pwd123@localhost:27017" + mlaunch init --dir $SINGLE_DIR --ipv6 --single --setParameter enableTestCommands=1 + echo "mongodb://localhost:27017" else echo "unsupported topology: $1" fi