From 65cd095b164d3bde0cb0a007d986eaa99c337035 Mon Sep 17 00:00:00 2001 From: Charlie Vieth Date: Thu, 6 Jul 2023 14:24:30 -0400 Subject: [PATCH 01/15] GODRIVER-2887 Remove use of reflect.Value.MethodByName in bson (#1308) --- bson/bsoncodec/default_value_decoders.go | 20 +++---- bson/bsoncodec/default_value_decoders_test.go | 11 +++- bson/bsoncodec/default_value_encoders.go | 56 +++++++++++-------- bson/mgocompat/setter_getter.go | 38 +++++++------ 4 files changed, 76 insertions(+), 49 deletions(-) diff --git a/bson/bsoncodec/default_value_decoders.go b/bson/bsoncodec/default_value_decoders.go index b5e22c498a..e479c3585b 100644 --- a/bson/bsoncodec/default_value_decoders.go +++ b/bson/bsoncodec/default_value_decoders.go @@ -1540,12 +1540,12 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr return err } - fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(ValueUnmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSONValue(t, src) } // UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. @@ -1588,12 +1588,12 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonr val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } - fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(Unmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSON(src) } // EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. diff --git a/bson/bsoncodec/default_value_decoders_test.go b/bson/bsoncodec/default_value_decoders_test.go index d8c8389654..bac92e04f8 100644 --- a/bson/bsoncodec/default_value_decoders_test.go +++ b/bson/bsoncodec/default_value_decoders_test.go @@ -1530,13 +1530,22 @@ func TestDefaultValueDecoders(t *testing.T) { errors.New("copy error"), }, { - "Unmarshaler", + // Only the pointer form of testUnmarshaler implements Unmarshaler + "value does not implement Unmarshaler", testUnmarshaler{Val: bsoncore.AppendDouble(nil, 3.14159)}, nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14159)}, bsonrwtest.ReadDouble, nil, }, + { + "Unmarshaler", + &testUnmarshaler{Val: bsoncore.AppendDouble(nil, 3.14159)}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14159)}, + bsonrwtest.ReadDouble, + nil, + }, }, }, { diff --git a/bson/bsoncodec/default_value_encoders.go b/bson/bsoncodec/default_value_encoders.go index 7d526c4ef8..4ab14a668c 100644 --- a/bson/bsoncodec/default_value_encoders.go +++ b/bson/bsoncodec/default_value_encoders.go @@ -564,12 +564,14 @@ func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bs return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} } - fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") - returns := fn.Call(nil) - if !returns[2].IsNil() { - return returns[2].Interface().(error) + m, ok := val.Interface().(ValueMarshaler) + if !ok { + return vw.WriteNull() + } + t, data, err := m.MarshalBSONValue() + if err != nil { + return err } - t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) } @@ -593,12 +595,14 @@ func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw. return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} } - fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Marshaler) + if !ok { + return vw.WriteNull() + } + data, err := m.MarshalBSON() + if err != nil { + return err } - data := returns[0].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) } @@ -622,23 +626,31 @@ func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.Val return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} } - fn := val.Convert(tProxy).MethodByName("ProxyBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Proxy) + if !ok { + return vw.WriteNull() + } + v, err := m.ProxyBSON() + if err != nil { + return err + } + if v == nil { + encoder, err := ec.LookupEncoder(nil) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) } - data := returns[0] - var encoder ValueEncoder - var err error - if data.Elem().IsValid() { - encoder, err = ec.LookupEncoder(data.Elem().Type()) - } else { - encoder, err = ec.LookupEncoder(nil) + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Ptr, reflect.Interface: + vv = vv.Elem() } + encoder, err := ec.LookupEncoder(vv.Type()) if err != nil { return err } - return encoder.EncodeValue(ec, vw, data.Elem()) + return encoder.EncodeValue(ec, vw, vv) } // JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. diff --git a/bson/mgocompat/setter_getter.go b/bson/mgocompat/setter_getter.go index 55af549d40..e9c9cae834 100644 --- a/bson/mgocompat/setter_getter.go +++ b/bson/mgocompat/setter_getter.go @@ -7,6 +7,7 @@ package mgocompat import ( + "errors" "reflect" "go.mongodb.org/mongo-driver/bson" @@ -73,16 +74,15 @@ func SetterDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val ref return err } - fn := val.Convert(tSetter).MethodByName("SetBSON") - - errVal := fn.Call([]reflect.Value{reflect.ValueOf(bson.RawValue{Type: t, Value: src})})[0] - if !errVal.IsNil() { - err = errVal.Interface().(error) - if err == ErrSetZero { - val.Set(reflect.Zero(val.Type())) - return nil + m, ok := val.Interface().(Setter) + if !ok { + return bsoncodec.ValueDecoderError{Name: "SetterDecodeValue", Types: []reflect.Type{tSetter}, Received: val} + } + if err := m.SetBSON(bson.RawValue{Type: t, Value: src}); err != nil { + if !errors.Is(err, ErrSetZero) { + return err } - return err + val.Set(reflect.Zero(val.Type())) } return nil } @@ -104,17 +104,23 @@ func GetterEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val re return bsoncodec.ValueEncoderError{Name: "GetterEncodeValue", Types: []reflect.Type{tGetter}, Received: val} } - fn := val.Convert(tGetter).MethodByName("GetBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Getter) + if !ok { + return vw.WriteNull() + } + x, err := m.GetBSON() + if err != nil { + return err + } + if x == nil { + return vw.WriteNull() } - intermediate := returns[0] - encoder, err := ec.Registry.LookupEncoder(intermediate.Type()) + vv := reflect.ValueOf(x) + encoder, err := ec.Registry.LookupEncoder(vv.Type()) if err != nil { return err } - return encoder.EncodeValue(ec, vw, intermediate) + return encoder.EncodeValue(ec, vw, vv) } // isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type From 4f9d1de5cddde95aa42de5001128588972d8df74 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 10 Jul 2023 13:51:08 -0500 Subject: [PATCH 02/15] GODRIVER-2883 Handle boolean 'ok' responses (#1314) --- x/mongo/driver/auth/plain_test.go | 43 +++++++++++++++++++++++++++++++ x/mongo/driver/errors.go | 4 +++ 2 files changed, 47 insertions(+) diff --git a/x/mongo/driver/auth/plain_test.go b/x/mongo/driver/auth/plain_test.go index b5967c579c..baaf175d85 100644 --- a/x/mongo/driver/auth/plain_test.go +++ b/x/mongo/driver/auth/plain_test.go @@ -13,6 +13,7 @@ import ( "encoding/base64" + "go.mongodb.org/mongo-driver/internal/require" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" . "go.mongodb.org/mongo-driver/x/mongo/driver/auth" @@ -145,3 +146,45 @@ func TestPlainAuthenticator_Succeeds(t *testing.T) { ) compareResponses(t, <-c.Written, expectedCmd, "$external") } + +func TestPlainAuthenticator_SucceedsBoolean(t *testing.T) { + t.Parallel() + + authenticator := PlainAuthenticator{ + Username: "user", + Password: "pencil", + } + + resps := make(chan []byte, 1) + writeReplies(resps, bsoncore.BuildDocumentFromElements(nil, + bsoncore.AppendBooleanElement(nil, "ok", true), + bsoncore.AppendInt32Element(nil, "conversationId", 1), + bsoncore.AppendBinaryElement(nil, "payload", 0x00, []byte{}), + bsoncore.AppendBooleanElement(nil, "done", true), + )) + + desc := description.Server{ + WireVersion: &description.VersionRange{ + Max: 6, + }, + } + c := &drivertest.ChannelConn{ + Written: make(chan []byte, 1), + ReadResp: resps, + Desc: desc, + } + + err := authenticator.Auth(context.Background(), &Config{Description: desc, Connection: c}) + require.NoError(t, err, "Auth error") + require.Len(t, c.Written, 1, "expected 1 messages to be sent") + + payload, err := base64.StdEncoding.DecodeString("AHVzZXIAcGVuY2ls") + require.NoError(t, err, "DecodeString error") + + expectedCmd := bsoncore.BuildDocumentFromElements(nil, + bsoncore.AppendInt32Element(nil, "saslStart", 1), + bsoncore.AppendStringElement(nil, "mechanism", "PLAIN"), + bsoncore.AppendBinaryElement(nil, "payload", 0x00, payload), + ) + compareResponses(t, <-c.Written, expectedCmd, "$external") +} diff --git a/x/mongo/driver/errors.go b/x/mongo/driver/errors.go index cb56b84f50..df4192619a 100644 --- a/x/mongo/driver/errors.go +++ b/x/mongo/driver/errors.go @@ -392,6 +392,10 @@ func ExtractErrorFromServerResponse(doc bsoncore.Document) error { if elem.Value().Double() == 1 { ok = true } + case bson.TypeBoolean: + if elem.Value().Boolean() { + ok = true + } } case "errmsg": if str, okay := elem.Value().StringValueOK(); okay { From f8b88fc241a91c32455f61c57557921ba7d17d2d Mon Sep 17 00:00:00 2001 From: Preston Vasquez Date: Mon, 10 Jul 2023 15:46:10 -0600 Subject: [PATCH 03/15] GODRIVER-2827 Add prose test for change stream splitting (#1310) --- mongo/integration/change_stream_test.go | 89 +++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/mongo/integration/change_stream_test.go b/mongo/integration/change_stream_test.go index bcdab4a3c3..08f1451be1 100644 --- a/mongo/integration/change_stream_test.go +++ b/mongo/integration/change_stream_test.go @@ -8,6 +8,7 @@ package integration import ( "context" + "strings" "sync" "testing" "time" @@ -721,6 +722,94 @@ func TestChangeStream_ReplicaSet(t *testing.T) { wg.Wait() }) + + splitLargeChangesCollOpts := options. + CreateCollection(). + SetChangeStreamPreAndPostImages(bson.M{"enabled": true}) + + splitLargeChangesOpts := mtOpts. + MinServerVersion("7.0.0"). + CreateClient(true). + CollectionCreateOptions(splitLargeChangesCollOpts) + + mt.RunOpts("split large changes", splitLargeChangesOpts, func(mt *mtest.T) { + type idValue struct { + ID int32 `bson:"_id"` + Value string `bson:"value"` + } + + doc := idValue{ + ID: 1, + Value: "q" + strings.Repeat("q", 10*1024*1024), + } + + // Insert the document + _, err := mt.Coll.InsertOne(context.Background(), doc) + require.NoError(t, err, "failed to insert idValue") + + // Watch for change events + pipeline := mongo.Pipeline{ + {{"$changeStreamSplitLargeEvent", bson.D{}}}, + } + + opts := options.ChangeStream().SetFullDocument(options.Required) + + cs, err := mt.Coll.Watch(context.Background(), pipeline, opts) + require.NoError(t, err, "failed to watch collection") + + defer closeStream(cs) + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + + filter := bson.D{{"_id", int32(1)}} + update := bson.D{{"$set", bson.D{{"value", "z" + strings.Repeat("q", 10*1024*1024)}}}} + + _, err := mt.Coll.UpdateOne(context.Background(), filter, update) + require.NoError(mt, err, "failed to update idValue") + }() + + nextCtx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + t.Cleanup(cancel) + + type splitEvent struct { + Fragment int32 `bson:"fragment"` + Of int32 `bson:"of"` + } + + got := struct { + SplitEvent splitEvent `bson:"splitEvent"` + }{} + + cs.Next(nextCtx) + + err = cs.Decode(&got) + require.NoError(mt, err, "failed to decode first iteration") + + want := splitEvent{ + Fragment: 1, + Of: 2, + } + + assert.Equal(mt, want, got.SplitEvent, "expected and actual Decode results are different") + + cs.Next(nextCtx) + + err = cs.Decode(&got) + require.NoError(mt, err, "failed to decoded second iteration") + + want = splitEvent{ + Fragment: 2, + Of: 2, + } + + assert.Equal(mt, want, got.SplitEvent, "expected and actual decode results are different") + + wg.Wait() + }) } func closeStream(cs *mongo.ChangeStream) { From 4d0c9756f689a2b93ce817dba836771dc4724fa0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Jul 2023 12:43:19 -0500 Subject: [PATCH 04/15] GODRIVER-2873 Bump minServerVersion for CSFLE deterministic encryption spec test for explain command (#1315) --- testdata/client-side-encryption/legacy/explain.json | 2 +- testdata/client-side-encryption/legacy/explain.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testdata/client-side-encryption/legacy/explain.json b/testdata/client-side-encryption/legacy/explain.json index 0e451e4818..8ca3b48d37 100644 --- a/testdata/client-side-encryption/legacy/explain.json +++ b/testdata/client-side-encryption/legacy/explain.json @@ -1,7 +1,7 @@ { "runOn": [ { - "minServerVersion": "4.1.10" + "minServerVersion": "7.0.0" } ], "database_name": "default", diff --git a/testdata/client-side-encryption/legacy/explain.yml b/testdata/client-side-encryption/legacy/explain.yml index c0dd9c57c0..325928a72e 100644 --- a/testdata/client-side-encryption/legacy/explain.yml +++ b/testdata/client-side-encryption/legacy/explain.yml @@ -1,5 +1,5 @@ runOn: - - minServerVersion: "4.1.10" + - minServerVersion: "7.0.0" database_name: &database_name "default" collection_name: &collection_name "default" From 5d98e7da2b303409e3f2341584f257a8e3c917fe Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 14 Jul 2023 17:24:05 -0500 Subject: [PATCH 05/15] GODRIVER-2858 Run a subset of tests on MacOS (#1318) --- .evergreen/run-tests.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 66cb9f394c..392531613a 100644 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -88,7 +88,12 @@ if [ "Windows_NT" = "$OS" ]; then fi if [ -z ${MAKEFILE_TARGET+x} ]; then - MAKEFILE_TARGET="evg-test" + if [ "$(uname -s)" = "Darwin" ]; then + # Run a subset of the tests on Darwin + MAKEFILE_TARGET="evg-test-load-balancers" + else + MAKEFILE_TARGET="evg-test" + fi fi AUTH=${AUTH} \ From 995cb8d81a8dee33059a25ca5fc3306ce525a309 Mon Sep 17 00:00:00 2001 From: Matt Dale <9760375+matthewdale@users.noreply.github.com> Date: Fri, 14 Jul 2023 19:15:24 -0700 Subject: [PATCH 06/15] GODRIVER-2847 Improve error message if RunCommandCursor command doesn't return a cursor. (#1317) --- mongo/database.go | 4 ++++ mongo/integration/database_test.go | 2 +- x/mongo/driver/batch_cursor.go | 26 ++++++++++++++++++++------ 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/mongo/database.go b/mongo/database.go index 8dd0352aed..2fe53acf71 100644 --- a/mongo/database.go +++ b/mongo/database.go @@ -254,6 +254,10 @@ func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{} if err = op.Execute(ctx); err != nil { closeImplicitSession(sess) + if errors.Is(err, driver.ErrNoCursor) { + return nil, errors.New( + "database response does not contain a cursor; try using RunCommand instead") + } return nil, replaceErrors(err) } diff --git a/mongo/integration/database_test.go b/mongo/integration/database_test.go index aaa229eb97..6a87e88e8a 100644 --- a/mongo/integration/database_test.go +++ b/mongo/integration/database_test.go @@ -357,7 +357,7 @@ func TestDatabase(t *testing.T) { {"cursor", bson.D{}}, } pingCmd := bson.D{{"ping", 1}} - pingErr := errors.New("cursor should be an embedded document but is of BSON type invalid") + pingErr := errors.New("database response does not contain a cursor; try using RunCommand instead") testCases := []struct { name string diff --git a/x/mongo/driver/batch_cursor.go b/x/mongo/driver/batch_cursor.go index a3f21f96c2..96df52b5f4 100644 --- a/x/mongo/driver/batch_cursor.go +++ b/x/mongo/driver/batch_cursor.go @@ -20,6 +20,10 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/session" ) +// ErrNoCursor is returned by NewCursorResponse when the database response does +// not contain a cursor. +var ErrNoCursor = errors.New("database response does not contain a cursor") + // BatchCursor is a batch implementation of a cursor. It returns documents in entire batches instead // of one at a time. An individual document cursor can be built on top of this batch cursor. type BatchCursor struct { @@ -62,17 +66,27 @@ type CursorResponse struct { postBatchResumeToken bsoncore.Document } -// NewCursorResponse constructs a cursor response from the given response and server. This method -// can be used within the ProcessResponse method for an operation. +// NewCursorResponse constructs a cursor response from the given response and +// server. If the provided database response does not contain a cursor, it +// returns ErrNoCursor. +// +// NewCursorResponse can be used within the ProcessResponse method for an operation. func NewCursorResponse(info ResponseInfo) (CursorResponse, error) { response := info.ServerResponse - cur, ok := response.Lookup("cursor").DocumentOK() + cur, err := response.LookupErr("cursor") + if err == bsoncore.ErrElementNotFound { + return CursorResponse{}, ErrNoCursor + } + if err != nil { + return CursorResponse{}, fmt.Errorf("error getting cursor from database response: %w", err) + } + curDoc, ok := cur.DocumentOK() if !ok { - return CursorResponse{}, fmt.Errorf("cursor should be an embedded document but is of BSON type %s", response.Lookup("cursor").Type) + return CursorResponse{}, fmt.Errorf("cursor should be an embedded document but is BSON type %s", cur.Type) } - elems, err := cur.Elements() + elems, err := curDoc.Elements() if err != nil { - return CursorResponse{}, err + return CursorResponse{}, fmt.Errorf("error getting elements from cursor: %w", err) } curresp := CursorResponse{Server: info.Server, Desc: info.ConnectionDescription} From a38c3ac4c7e481aa62912cd27a1168aec5a1fdf8 Mon Sep 17 00:00:00 2001 From: Preston Vasquez Date: Mon, 17 Jul 2023 11:52:28 -0600 Subject: [PATCH 07/15] GODRIVER-2743 Expose an API to create a cursor from a command response (#1273) Co-authored-by: Matt Dale <9760375+matthewdale@users.noreply.github.com> --- internal/codecutil/encoding.go | 65 ++ internal/codecutil/encoding_test.go | 90 ++ mongo/batch_cursor.go | 17 +- mongo/change_stream.go | 6 +- mongo/collection.go | 6 + mongo/cursor.go | 17 + mongo/cursor_test.go | 5 +- mongo/database.go | 6 + mongo/errors.go | 12 + mongo/index_view.go | 3 + mongo/integration/client_test.go | 3 + .../unified/database_operation_execution.go | 161 +++- mongo/integration/unified/entity.go | 14 + mongo/integration/unified/operation.go | 4 + .../integration/unified/unified_spec_test.go | 1 + mongo/mongo.go | 30 +- mongo/mongo_test.go | 3 +- mongo/options/runcmdoptions.go | 4 +- .../runCursorCommand.json | 583 ++++++++++++ .../runCursorCommand.yml | 304 ++++++ testdata/crud/unified/find-comment.yml | 20 + testdata/run-command/runCommand.json | 635 +++++++++++++ testdata/run-command/runCommand.yml | 319 +++++++ testdata/run-command/runCursorCommand.json | 877 ++++++++++++++++++ testdata/run-command/runCursorCommand.yml | 391 ++++++++ .../valid-pass/entity-commandCursor.json | 278 ++++++ .../valid-pass/entity-commandCursor.yml | 115 +++ .../valid-pass/poc-sessions.yml | 4 +- x/mongo/driver/batch_cursor.go | 51 +- x/mongo/driver/batch_cursor_test.go | 41 + .../driver/list_collections_batch_cursor.go | 24 +- x/mongo/driver/operation.go | 12 +- x/mongo/driver/operation_test.go | 3 +- 33 files changed, 4060 insertions(+), 44 deletions(-) create mode 100644 internal/codecutil/encoding.go create mode 100644 internal/codecutil/encoding_test.go create mode 100644 testdata/client-side-operations-timeout/runCursorCommand.json create mode 100644 testdata/client-side-operations-timeout/runCursorCommand.yml create mode 100644 testdata/run-command/runCommand.json create mode 100644 testdata/run-command/runCommand.yml create mode 100644 testdata/run-command/runCursorCommand.json create mode 100644 testdata/run-command/runCursorCommand.yml create mode 100644 testdata/unified-test-format/valid-pass/entity-commandCursor.json create mode 100644 testdata/unified-test-format/valid-pass/entity-commandCursor.yml diff --git a/internal/codecutil/encoding.go b/internal/codecutil/encoding.go new file mode 100644 index 0000000000..2aaf8f2719 --- /dev/null +++ b/internal/codecutil/encoding.go @@ -0,0 +1,65 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package codecutil + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var ErrNilValue = errors.New("value is nil") + +// MarshalError is returned when attempting to transform a value into a document +// results in an error. +type MarshalError struct { + Value interface{} + Err error +} + +// Error implements the error interface. +func (e MarshalError) Error() string { + return fmt.Sprintf("cannot transform type %s to a BSON Document: %v", + reflect.TypeOf(e.Value), e.Err) +} + +// EncoderFn is used to functionally construct an encoder for marshaling values. +type EncoderFn func(io.Writer) (*bson.Encoder, error) + +// MarshalValue will attempt to encode the value with the encoder returned by +// the encoder function. +func MarshalValue(val interface{}, encFn EncoderFn) (bsoncore.Value, error) { + // If the val is already a bsoncore.Value, then do nothing. + if bval, ok := val.(bsoncore.Value); ok { + return bval, nil + } + + if val == nil { + return bsoncore.Value{}, ErrNilValue + } + + buf := new(bytes.Buffer) + + enc, err := encFn(buf) + if err != nil { + return bsoncore.Value{}, err + } + + // Encode the value in a single-element document with an empty key. Use + // bsoncore to extract the first element and return the BSON value. + err = enc.Encode(bson.D{{Key: "", Value: val}}) + if err != nil { + return bsoncore.Value{}, MarshalError{Value: val, Err: err} + } + + return bsoncore.Document(buf.Bytes()).Index(0).Value(), nil +} diff --git a/internal/codecutil/encoding_test.go b/internal/codecutil/encoding_test.go new file mode 100644 index 0000000000..9696048f71 --- /dev/null +++ b/internal/codecutil/encoding_test.go @@ -0,0 +1,90 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package codecutil + +import ( + "io" + "testing" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/internal/assert" + "go.mongodb.org/mongo-driver/internal/require" +) + +func testEncFn(t *testing.T) EncoderFn { + t.Helper() + + return func(w io.Writer) (*bson.Encoder, error) { + rw, err := bsonrw.NewBSONValueWriter(w) + require.NoError(t, err, "failed to construct BSONValue writer") + + enc, err := bson.NewEncoder(rw) + require.NoError(t, err, "failed to construct encoder") + + return enc, nil + } +} + +func TestMarshalValue(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + val interface{} + registry *bsoncodec.Registry + encFn EncoderFn + want string + wantErr error + }{ + { + name: "empty", + val: nil, + want: "", + wantErr: ErrNilValue, + encFn: testEncFn(t), + }, + { + name: "bson.D", + val: bson.D{{"foo", "bar"}}, + want: `{"foo": "bar"}`, + encFn: testEncFn(t), + }, + { + name: "map", + val: map[string]interface{}{"foo": "bar"}, + want: `{"foo": "bar"}`, + encFn: testEncFn(t), + }, + { + name: "struct", + val: struct{ Foo string }{Foo: "bar"}, + want: `{"foo": "bar"}`, + encFn: testEncFn(t), + }, + { + name: "non-document type", + val: "foo: bar", + want: `"foo: bar"`, + encFn: testEncFn(t), + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + value, err := MarshalValue(test.val, test.encFn) + + assert.Equal(t, test.wantErr, err, "expected and actual error do not match") + assert.Equal(t, test.want, value.String(), "expected and actual comments are different") + }) + } +} diff --git a/mongo/batch_cursor.go b/mongo/batch_cursor.go index da2e27bc68..51d59d0ffa 100644 --- a/mongo/batch_cursor.go +++ b/mongo/batch_cursor.go @@ -8,6 +8,7 @@ package mongo import ( "context" + "time" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -35,9 +36,21 @@ type batchCursor interface { // Close closes the cursor. Close(context.Context) error - // The SetBatchSize method is a modifier function used to adjust the - // batch size of the cursor that implements it. + // SetBatchSize is a modifier function used to adjust the batch size of + // the cursor that implements it. SetBatchSize(int32) + + // SetMaxTime will set the maximum amount of time the server will allow + // the operations to execute. The server will error if this field is set + // but the cursor is not configured with awaitData=true. + // + // The time.Duration value passed by this setter will be converted and + // rounded down to the nearest millisecond. + SetMaxTime(time.Duration) + + // SetComment will set a user-configurable comment that can be used to + // identify the operation in server logs. + SetComment(interface{}) } // changeStreamCursor is the interface implemented by batch cursors that also provide the functionality for retrieving diff --git a/mongo/change_stream.go b/mongo/change_stream.go index 76fe86f000..3dc6ecf16c 100644 --- a/mongo/change_stream.go +++ b/mongo/change_stream.go @@ -107,6 +107,10 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in ctx = context.Background() } + cursorOpts := config.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(config.bsonOpts, config.registry) + cs := &ChangeStream{ client: config.client, bsonOpts: config.bsonOpts, @@ -117,7 +121,7 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in description.ReadPrefSelector(config.readPreference), description.LatencySelector(config.client.localThreshold), }), - cursorOptions: config.client.createBaseCursorOptions(), + cursorOptions: cursorOpts, } cs.sess = sessionFromContext(ctx) diff --git a/mongo/collection.go b/mongo/collection.go index 1e696ded96..d4c9934013 100644 --- a/mongo/collection.go +++ b/mongo/collection.go @@ -841,8 +841,11 @@ func aggregate(a aggregateParams) (cur *Cursor, err error) { } ao := options.MergeAggregateOptions(a.opts...) + cursorOpts := a.client.createBaseCursorOptions() + cursorOpts.MarshalValueEncoderFn = newEncoderFn(a.bsonOpts, a.registry) + op := operation.NewAggregate(pipelineArr). Session(sess). WriteConcern(wc). @@ -1230,6 +1233,9 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, Timeout(coll.client.timeout).MaxTime(fo.MaxTime).Logger(coll.client.logger) cursorOpts := coll.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(coll.bsonOpts, coll.registry) + if fo.AllowDiskUse != nil { op.AllowDiskUse(*fo.AllowDiskUse) } diff --git a/mongo/cursor.go b/mongo/cursor.go index 9b348cb46a..d2228ed9c4 100644 --- a/mongo/cursor.go +++ b/mongo/cursor.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "reflect" + "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" @@ -389,6 +390,22 @@ func (c *Cursor) SetBatchSize(batchSize int32) { c.bc.SetBatchSize(batchSize) } +// SetMaxTime will set the maximum amount of time the server will allow the +// operations to execute. The server will error if this field is set but the +// cursor is not configured with awaitData=true. +// +// The time.Duration value passed by this setter will be converted and rounded +// down to the nearest millisecond. +func (c *Cursor) SetMaxTime(dur time.Duration) { + c.bc.SetMaxTime(dur) +} + +// SetComment will set a user-configurable comment that can be used to identify +// the operation in server logs. +func (c *Cursor) SetComment(comment interface{}) { + c.bc.SetComment(comment) +} + // BatchCursorFromCursor returns a driver.BatchCursor for the given Cursor. If there is no underlying // driver.BatchCursor, nil is returned. // diff --git a/mongo/cursor_test.go b/mongo/cursor_test.go index c83d269e35..3781109019 100644 --- a/mongo/cursor_test.go +++ b/mongo/cursor_test.go @@ -10,6 +10,7 @@ import ( "context" "fmt" "testing" + "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/internal/assert" @@ -88,7 +89,9 @@ func (tbc *testBatchCursor) Close(context.Context) error { return nil } -func (tbc *testBatchCursor) SetBatchSize(int32) {} +func (tbc *testBatchCursor) SetBatchSize(int32) {} +func (tbc *testBatchCursor) SetComment(interface{}) {} +func (tbc *testBatchCursor) SetMaxTime(time.Duration) {} func TestCursor(t *testing.T) { t.Run("loops until docs available", func(t *testing.T) {}) diff --git a/mongo/database.go b/mongo/database.go index 2fe53acf71..b95e8f524b 100644 --- a/mongo/database.go +++ b/mongo/database.go @@ -177,6 +177,9 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, switch cursorCommand { case true: cursorOpts := db.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(db.bsonOpts, db.registry) + op = operation.NewCursorCommand(runCmdDoc, cursorOpts) default: op = operation.NewCommand(runCmdDoc) @@ -399,6 +402,9 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt ServerAPI(db.client.serverAPI).Timeout(db.client.timeout) cursorOpts := db.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(db.bsonOpts, db.registry) + if lco.NameOnly != nil { op = op.NameOnly(*lco.NameOnly) } diff --git a/mongo/errors.go b/mongo/errors.go index aff99378da..5f2b1b819b 100644 --- a/mongo/errors.go +++ b/mongo/errors.go @@ -15,6 +15,7 @@ import ( "strings" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/internal/codecutil" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" "go.mongodb.org/mongo-driver/x/mongo/driver/topology" @@ -87,6 +88,17 @@ func replaceErrors(err error) error { return MongocryptError{Code: me.Code, Message: me.Message} } + if errors.Is(err, codecutil.ErrNilValue) { + return ErrNilValue + } + + if marshalErr, ok := err.(codecutil.MarshalError); ok { + return MarshalError{ + Value: marshalErr.Value, + Err: marshalErr.Err, + } + } + return err } diff --git a/mongo/index_view.go b/mongo/index_view.go index 502de2f2f1..41a93a2145 100644 --- a/mongo/index_view.go +++ b/mongo/index_view.go @@ -94,6 +94,9 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption Timeout(iv.coll.client.timeout) cursorOpts := iv.coll.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(iv.coll.bsonOpts, iv.coll.registry) + lio := options.MergeListIndexesOptions(opts...) if lio.BatchSize != nil { op = op.BatchSize(*lio.BatchSize) diff --git a/mongo/integration/client_test.go b/mongo/integration/client_test.go index 267bd4aa7c..98639eec6c 100644 --- a/mongo/integration/client_test.go +++ b/mongo/integration/client_test.go @@ -502,6 +502,9 @@ func TestClient(t *testing.T) { // When connected directly, the primary read preference should be overwritten to primaryPreferred. evt := mt.GetStartedEvent() assert.Equal(mt, "find", evt.CommandName, "expected 'find' event, got '%s'", evt.CommandName) + + // A direct connection will result in a single topology, and so + // the default readPreference mode should be "primaryPrefered". modeVal, err := evt.Command.LookupErr("$readPreference", "mode") assert.Nil(mt, err, "expected command %s to include $readPreference", evt.Command) diff --git a/mongo/integration/unified/database_operation_execution.go b/mongo/integration/unified/database_operation_execution.go index fcf4305a12..9c2005c130 100644 --- a/mongo/integration/unified/database_operation_execution.go +++ b/mongo/integration/unified/database_operation_execution.go @@ -91,6 +91,12 @@ func executeCreateCollection(ctx context.Context, operation *operation) (*operat cco.SetChangeStreamPreAndPostImages(val.Document()) case "expireAfterSeconds": cco.SetExpireAfterSeconds(int64(val.Int32())) + case "capped": + cco.SetCapped(val.Boolean()) + case "size": + cco.SetSizeInBytes(val.AsInt64()) + case "max": + cco.SetMaxDocuments(val.AsInt64()) case "timeseries": tsElems, err := elem.Value().Document().Elements() if err != nil { @@ -129,7 +135,20 @@ func executeCreateCollection(ctx context.Context, operation *operation) (*operat } err = db.CreateCollection(ctx, collName, &cco) - return newErrorResult(err), nil + if err != nil { + return newErrorResult(err), nil + } + + if collID := operation.ResultEntityID; collID != nil { + collEntityOpts := newCollectionEntityOptions(*collID, operation.Object, collName, nil) + + err := entities(ctx).addCollectionEntity(collEntityOpts) + if err != nil { + return nil, fmt.Errorf("failed to save collection as entity: %w", err) + } + } + + return newEmptyResult(), nil } func executeDropCollection(ctx context.Context, operation *operation) (*operationResult, error) { @@ -253,3 +272,143 @@ func executeRunCommand(ctx context.Context, operation *operation) (*operationRes res, err := db.RunCommand(ctx, command, opts).DecodeBytes() return newDocumentResult(res, err), nil } + +// executeRunCursorCommand proxies the database's runCursorCommand method and +// supports the same arguments and options. +func executeRunCursorCommand(ctx context.Context, operation *operation) (*operationResult, error) { + db, err := entities(ctx).database(operation.Object) + if err != nil { + return nil, err + } + + var ( + batchSize int32 + command bson.Raw + comment bson.Raw + maxTime time.Duration + ) + + opts := options.RunCmd() + + elems, _ := operation.Arguments.Elements() + for _, elem := range elems { + key := elem.Key() + val := elem.Value() + + switch key { + case "batchSize": + batchSize = val.Int32() + case "command": + command = val.Document() + case "commandName": + // This is only necessary for languages that cannot + // preserve key order in the command document, so we can + // ignore it. + case "comment": + comment = val.Document() + case "maxTimeMS": + maxTime = time.Duration(val.AsInt64()) * time.Millisecond + case "cursorTimeout": + return nil, newSkipTestError("cursorTimeout not supported") + case "timeoutMode": + return nil, newSkipTestError("timeoutMode not supported") + default: + return nil, fmt.Errorf("unrecognized runCursorCommand option: %q", key) + } + } + + if command == nil { + return nil, newMissingArgumentError("command") + } + + cursor, err := db.RunCommandCursor(ctx, command, opts) + if err != nil { + return newErrorResult(err), nil + } + + if batchSize > 0 { + cursor.SetBatchSize(batchSize) + } + + if maxTime > 0 { + cursor.SetMaxTime(maxTime) + } + + if len(comment) > 0 { + cursor.SetComment(comment) + } + + // When executing the provided command, the test runner MUST fully + // iterate the cursor. This will ensure consistent behavior between + // drivers that eagerly create a server-side cursor and those that do + // so lazily when iteration begins. + var docs []bson.Raw + if err := cursor.All(ctx, &docs); err != nil { + return newErrorResult(err), nil + } + + return newCursorResult(docs), nil +} + +// executeCreateRunCursorCommand proxies the database's runCursorCommand method +// and supports the same arguments and options. +func executeCreateRunCursorCommand(ctx context.Context, operation *operation) (*operationResult, error) { + db, err := entities(ctx).database(operation.Object) + if err != nil { + return nil, err + } + + var ( + batchSize int32 + command bson.Raw + ) + + opts := options.RunCmd() + + elems, _ := operation.Arguments.Elements() + for _, elem := range elems { + key := elem.Key() + val := elem.Value() + + switch key { + case "batchSize": + batchSize = val.Int32() + case "command": + command = val.Document() + case "commandName": + // This is only necessary for languages that cannot + // preserve key order in the command document, so we can + // ignore it. + case "cursorType": + return nil, newSkipTestError("cursorType not supported") + case "timeoutMode": + return nil, newSkipTestError("timeoutMode not supported") + default: + return nil, fmt.Errorf("unrecognized createRunCursorCommand option: %q", key) + } + } + + if command == nil { + return nil, newMissingArgumentError("command") + } + + // Test runners MUST ensure that the server-side cursor is created (i.e. + // the command document has executed) as part of this operation. + cursor, err := db.RunCommandCursor(ctx, command, opts) + if err != nil { + return newErrorResult(err), nil + } + + if batchSize > 0 { + cursor.SetBatchSize(batchSize) + } + + if cursorID := operation.ResultEntityID; cursorID != nil { + err := entities(ctx).addCursorEntity(*cursorID, cursor) + if err != nil { + return nil, fmt.Errorf("failed to store result as cursor entity: %v", err) + } + } + + return newEmptyResult(), nil +} diff --git a/mongo/integration/unified/entity.go b/mongo/integration/unified/entity.go index f3de52bd06..7c916f0e01 100644 --- a/mongo/integration/unified/entity.go +++ b/mongo/integration/unified/entity.go @@ -84,6 +84,20 @@ type entityOptions struct { ClientEncryptionOpts *clientEncryptionOpts `bson:"clientEncryptionOpts"` } +// newCollectionEntityOptions constructs an entity options object for a +// collection. +func newCollectionEntityOptions(id string, databaseID string, collectionName string, + opts *dbOrCollectionOptions) *entityOptions { + options := &entityOptions{ + ID: id, + DatabaseID: databaseID, + CollectionName: collectionName, + CollectionOptions: opts, + } + + return options +} + type clientEncryptionOpts struct { KeyVaultClient string `bson:"keyVaultClient"` KeyVaultNamespace string `bson:"keyVaultNamespace"` diff --git a/mongo/integration/unified/operation.go b/mongo/integration/unified/operation.go index 4034543e14..66e9b2ed17 100644 --- a/mongo/integration/unified/operation.go +++ b/mongo/integration/unified/operation.go @@ -138,6 +138,10 @@ func (op *operation) run(ctx context.Context, loopDone <-chan struct{}) (*operat return executeListCollectionNames(ctx, op) case "runCommand": return executeRunCommand(ctx, op) + case "runCursorCommand": + return executeRunCursorCommand(ctx, op) + case "createCommandCursor": + return executeCreateRunCursorCommand(ctx, op) // Collection operations case "aggregate": diff --git a/mongo/integration/unified/unified_spec_test.go b/mongo/integration/unified/unified_spec_test.go index edfa481255..8d59c9afbd 100644 --- a/mongo/integration/unified/unified_spec_test.go +++ b/mongo/integration/unified/unified_spec_test.go @@ -29,6 +29,7 @@ var ( "client-side-encryption/unified", "client-side-operations-timeout", "gridfs", + "run-command", } failDirectories = []string{ "unified-test-format/valid-fail", diff --git a/mongo/mongo.go b/mongo/mongo.go index ded99e4e2b..393c5b7713 100644 --- a/mongo/mongo.go +++ b/mongo/mongo.go @@ -17,6 +17,7 @@ import ( "strconv" "strings" + "go.mongodb.org/mongo-driver/internal/codecutil" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -132,6 +133,14 @@ func getEncoder( return enc, nil } +// newEncoderFn will return a function for constructing an encoder based on the +// provided codec options. +func newEncoderFn(opts *options.BSONOptions, registry *bsoncodec.Registry) codecutil.EncoderFn { + return func(w io.Writer) (*bson.Encoder, error) { + return getEncoder(w, opts, registry) + } +} + // marshal marshals the given value as a BSON document. Byte slices are always converted to a // bson.Raw before marshaling. // @@ -421,26 +430,7 @@ func marshalValue( bsonOpts *options.BSONOptions, registry *bsoncodec.Registry, ) (bsoncore.Value, error) { - if registry == nil { - registry = bson.DefaultRegistry - } - if val == nil { - return bsoncore.Value{}, ErrNilValue - } - - buf := new(bytes.Buffer) - enc, err := getEncoder(buf, bsonOpts, registry) - if err != nil { - return bsoncore.Value{}, fmt.Errorf("error configuring BSON encoder: %w", err) - } - - // Encode the value in a single-element document with an empty key. Use bsoncore to extract the - // first element and return the BSON value. - err = enc.Encode(bson.D{{Key: "", Value: val}}) - if err != nil { - return bsoncore.Value{}, MarshalError{Value: val, Err: err} - } - return bsoncore.Document(buf.Bytes()).Index(0).Value(), nil + return codecutil.MarshalValue(val, newEncoderFn(bsonOpts, registry)) } // Build the aggregation pipeline for the CountDocument command. diff --git a/mongo/mongo_test.go b/mongo/mongo_test.go index 74cf7594ca..fd3f1ce869 100644 --- a/mongo/mongo_test.go +++ b/mongo/mongo_test.go @@ -16,6 +16,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/internal/assert" + "go.mongodb.org/mongo-driver/internal/codecutil" "go.mongodb.org/mongo-driver/internal/require" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -513,7 +514,7 @@ func TestMarshalValue(t *testing.T) { { name: "nil document", value: nil, - wantErr: ErrNilValue, + wantErr: codecutil.ErrNilValue, }, { name: "value marshaler", diff --git a/mongo/options/runcmdoptions.go b/mongo/options/runcmdoptions.go index 8c3899e78f..b0cdec32ce 100644 --- a/mongo/options/runcmdoptions.go +++ b/mongo/options/runcmdoptions.go @@ -6,7 +6,9 @@ package options -import "go.mongodb.org/mongo-driver/mongo/readpref" +import ( + "go.mongodb.org/mongo-driver/mongo/readpref" +) // RunCmdOptions represents options that can be used to configure a RunCommand operation. type RunCmdOptions struct { diff --git a/testdata/client-side-operations-timeout/runCursorCommand.json b/testdata/client-side-operations-timeout/runCursorCommand.json new file mode 100644 index 0000000000..5fc0be3399 --- /dev/null +++ b/testdata/client-side-operations-timeout/runCursorCommand.json @@ -0,0 +1,583 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "commandClient", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "commandDb", + "client": "commandClient", + "databaseName": "commandDb" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "errors if timeoutMode is set without timeoutMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if timeoutMode is cursorLifetime and cursorType is tailableAwait", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "timeoutMS": 100, + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "collection", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "cappedCollection", + "batchSize": 1, + "tailable": true + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 1, + "cursorType": "tailable" + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "foo": "bar" + }, + { + "fizz": "buzz" + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true + }, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection" + } + } + } + ] + } + ] + } + ] +} diff --git a/testdata/client-side-operations-timeout/runCursorCommand.yml b/testdata/client-side-operations-timeout/runCursorCommand.yml new file mode 100644 index 0000000000..16a648e028 --- /dev/null +++ b/testdata/client-side-operations-timeout/runCursorCommand.yml @@ -0,0 +1,304 @@ +description: runCursorCommand + +schemaVersion: '1.9' + +runOnRequirements: + - minServerVersion: "4.4" + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &commandClient commandClient + useMultipleMongoses: false + observeEvents: [commandStartedEvent, commandSucceededEvent] + - client: + id: &client client + useMultipleMongoses: false + observeEvents: [commandStartedEvent] + ignoreCommandMonitoringEvents: [killCursors] + - database: # For tests that need success event assertions + id: &commandDb commandDb + client: *commandClient + databaseName: *commandDb + - database: + id: &db db + client: *client + databaseName: *db + - collection: + id: &collection collection + database: *db + collectionName: *collection + +initialData: + - collectionName: *collection + databaseName: *db + documents: &documents + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - { _id: 5, x: 55 } + +tests: + - description: errors if timeoutMode is set without timeoutMS + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: find + command: { find: *collection } + timeoutMode: cursorLifetime + expectError: + isClientError: true + + - description: error if timeoutMode is cursorLifetime and cursorType is tailableAwait + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: find + command: { find: *collection } + timeoutMode: cursorLifetime + cursorType: tailableAwait + expectError: + isClientError: true + + # If timeoutMode is unset, it should default to CURSOR_LIFETIME and the time remaining after the find succeeds should be applied to the getMore + - description: Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset + runOnRequirements: + - serverless: forbid + operations: + # Block find/getMore for 15ms. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [find, getMore] + blockConnection: true + blockTimeMS: 60 + # Run a find with timeoutMS less than double our failPoint blockTimeMS and + # batchSize less than the total document count will cause a find and a getMore to be sent. + # Both will block for 60ms so together they will go over the timeout. + - name: runCursorCommand + object: *db + arguments: + commandName: find + timeoutMS: 100 + command: { find: *collection, batchSize: 2 } + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + command: + find: *collection + maxTimeMS: { $$type: [int, long] } + - commandStartedEvent: + commandName: getMore + command: + getMore: { $$type: [int, long] } + collection: *collection + maxTimeMS: { $$exists: true } + + # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither + # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=100 and "getMore" + # commands are blocked for 60ms, causing iteration to fail with a timeout error. + - description: Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure + runOnRequirements: + - serverless: forbid + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + blockTimeMS: 60 + - name: runCursorCommand + object: *db + arguments: + commandName: find + command: { find: *collection, batchSize: 2 } + timeoutMode: iteration + timeoutMS: 100 + batchSize: 2 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *db + command: + find: *collection + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: getMore + databaseName: *db + command: + getMore: { $$type: ["int", "long"] } + collection: *collection + maxTimeMS: { $$exists: false } + + # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a failure test. The + # find inherits timeoutMS=100 from the collection and the getMore command blocks for 60ms, causing iteration to fail + # with a timeout error. + - description: Tailable cursor iteration timeoutMS is refreshed for getMore - failure + runOnRequirements: + - serverless: forbid + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + blockTimeMS: 60 + - name: dropCollection + object: *db + arguments: + collection: &cappedCollection cappedCollection + - name: createCollection + object: *db + arguments: + collection: *cappedCollection + capped: true + size: 4096 + max: 3 + saveResultAsEntity: *cappedCollection + - name: insertMany + object: *cappedCollection + arguments: + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - name: createCommandCursor + object: *db + arguments: + commandName: find + command: { find: *cappedCollection, batchSize: 1, tailable: true } + timeoutMode: iteration + timeoutMS: 100 + batchSize: 1 + cursorType: tailable + saveResultAsEntity: &tailableCursor tailableCursor + # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the + # second will do a getMore. + - name: iterateUntilDocumentOrError + object: *tailableCursor + - name: iterateUntilDocumentOrError + object: *tailableCursor + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: drop + - commandStartedEvent: + commandName: create + - commandStartedEvent: + commandName: insert + - commandStartedEvent: + commandName: find + databaseName: *db + command: + find: *cappedCollection + tailable: true + awaitData: { $$exists: false } + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: getMore + databaseName: *db + command: + getMore: { $$type: ["int", "long"] } + collection: *cappedCollection + maxTimeMS: { $$exists: false } + + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=10 from + # the collection and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + - description: Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure + runOnRequirements: + - serverless: forbid + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + blockTimeMS: 60 + - name: dropCollection + object: *db + arguments: + collection: &cappedCollection cappedCollection + - name: createCollection + object: *db + arguments: + collection: *cappedCollection + capped: true + size: 4096 + max: 3 + saveResultAsEntity: *cappedCollection + - name: insertMany + object: *cappedCollection + arguments: + documents: [ { foo: bar }, { fizz: buzz } ] + - name: createCommandCursor + object: *db + arguments: + command: { find: *cappedCollection, tailable: true, awaitData: true } + cursorType: tailableAwait + batchSize: 1 + saveResultAsEntity: &tailableCursor tailableCursor + # Iterate twice to force a getMore. + - name: iterateUntilDocumentOrError + object: *tailableCursor + - name: iterateUntilDocumentOrError + object: *tailableCursor + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: drop + - commandStartedEvent: + commandName: create + - commandStartedEvent: + commandName: insert + - commandStartedEvent: + commandName: find + databaseName: *db + command: + find: *cappedCollection + tailable: true + awaitData: true + maxTimeMS: { $$exists: true } + - commandStartedEvent: + commandName: getMore + databaseName: *db + command: + getMore: { $$type: ["int", "long"] } + collection: *cappedCollection diff --git a/testdata/crud/unified/find-comment.yml b/testdata/crud/unified/find-comment.yml index 0ab1aa8b3a..905241ad0e 100644 --- a/testdata/crud/unified/find-comment.yml +++ b/testdata/crud/unified/find-comment.yml @@ -71,6 +71,26 @@ tests: runOnRequirements: - maxServerVersion: "4.2.99" minServerVersion: "3.6" + operations: + - name: find + object: *collection0 + arguments: + filter: *filter + comment: *comment + expectError: + isClientError: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + comment: *comment + + - description: "find with comment sets comment on getMore" + runOnRequirements: + - minServerVersion: "4.4.0" operations: - name: find object: *collection0 diff --git a/testdata/run-command/runCommand.json b/testdata/run-command/runCommand.json new file mode 100644 index 0000000000..007e514bd7 --- /dev/null +++ b/testdata/run-command/runCommand.json @@ -0,0 +1,635 @@ +{ + "description": "runCommand", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + }, + { + "database": { + "id": "dbWithRC", + "client": "client", + "databaseName": "dbWithRC", + "databaseOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "database": { + "id": "dbWithWC", + "client": "client", + "databaseName": "dbWithWC", + "databaseOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "client": { + "id": "clientWithStableApi", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "dbWithStableApi", + "client": "clientWithStableApi", + "databaseName": "dbWithStableApi" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "always attaches $db and implicit lsid to given command and omits default readPreference", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "db", + "lsid": { + "$$exists": true + }, + "$readPreference": { + "$$exists": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "always gossips the $clusterTime on the sent command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$clusterTime": { + "$$exists": true + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided session lsid to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided $readPreference to given command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "mode": "nearest" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach $readPreference to given command on standalone", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach primary $readPreference to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "primary" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not inherit readConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithRC", + "arguments": { + "commandName": "aggregate", + "command": { + "aggregate": "collection", + "pipeline": [], + "cursor": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection", + "readConcern": { + "$$exists": false + }, + "$db": "dbWithRC" + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "does not inherit writeConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithWC", + "arguments": { + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "bar" + } + ], + "ordered": true + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "writeConcern": { + "$$exists": false + }, + "$db": "dbWithWC" + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "does not retry retryable errors on given command", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "attaches transaction fields to given command", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "session": "session", + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "db" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "attaches apiVersion fields to given command when stableApi is configured on the client", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "dbWithStableApi", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "clientWithStableApi", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "dbWithStableApi", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/testdata/run-command/runCommand.yml b/testdata/run-command/runCommand.yml new file mode 100644 index 0000000000..eaa12eff23 --- /dev/null +++ b/testdata/run-command/runCommand.yml @@ -0,0 +1,319 @@ +description: runCommand + +schemaVersion: "1.3" + +createEntities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: [commandStartedEvent] + - database: + id: &db db + client: *client + databaseName: *db + - collection: + id: &collection collection + database: *db + collectionName: *collection + - database: + id: &dbWithRC dbWithRC + client: *client + databaseName: *dbWithRC + databaseOptions: + readConcern: { level: 'local' } + - database: + id: &dbWithWC dbWithWC + client: *client + databaseName: *dbWithWC + databaseOptions: + writeConcern: { w: 0 } + - session: + id: &session session + client: *client + # Stable API test + - client: + id: &clientWithStableApi clientWithStableApi + observeEvents: [commandStartedEvent] + serverApi: + version: "1" + strict: true + - database: + id: &dbWithStableApi dbWithStableApi + client: *clientWithStableApi + databaseName: *dbWithStableApi + +initialData: +- collectionName: *collection + databaseName: *db + documents: [] + +tests: + - description: always attaches $db and implicit lsid to given command and omits default readPreference + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + $db: *db + lsid: { $$exists: true } + $readPreference: { $$exists: false } + commandName: ping + + - description: always gossips the $clusterTime on the sent command + runOnRequirements: + # Only replicasets and sharded clusters have a $clusterTime + - topologies: [ replicaset, sharded ] + operations: + # We have to run one command to obtain a clusterTime to gossip + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectResult: { ok: 1 } + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + # Only check the shape of the second ping which should have the $clusterTime received from the first operation + - commandStartedEvent: + command: + ping: 1 + $clusterTime: { $$exists: true } + commandName: ping + + - description: attaches the provided session lsid to given command + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + session: *session + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + lsid: { $$sessionLsid: *session } + $db: *db + commandName: ping + + - description: attaches the provided $readPreference to given command + runOnRequirements: + # Exclude single topology, which is most likely a standalone server + - topologies: [ replicaset, sharded-replicaset, load-balanced, sharded ] + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + readPreference: &readPreference { mode: 'nearest' } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + $readPreference: *readPreference + $db: *db + commandName: ping + + - description: does not attach $readPreference to given command on standalone + runOnRequirements: + # This test assumes that the single topology contains a standalone server; + # however, it is possible for a single topology to contain a direct + # connection to another server type. + # See: https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst#topology-type-single + - topologies: [ single ] + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + readPreference: { mode: 'nearest' } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + $readPreference: { $$exists: false } + $db: *db + commandName: ping + + - description: does not attach primary $readPreference to given command + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + readPreference: { mode: 'primary' } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + $readPreference: { $$exists: false } + $db: *db + commandName: ping + + - description: does not inherit readConcern specified at the db level + operations: + - name: runCommand + object: *dbWithRC + # Test with a command that supports a readConcern option. + # expectResult is intentionally omitted because some drivers + # may automatically convert command responses into cursors. + arguments: + commandName: aggregate + command: { aggregate: *collection, pipeline: [], cursor: {} } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + aggregate: *collection + readConcern: { $$exists: false } + $db: *dbWithRC + commandName: aggregate + + - description: does not inherit writeConcern specified at the db level + operations: + - name: runCommand + object: *dbWithWC + arguments: + commandName: insert + command: + insert: *collection + documents: [ { foo: 'bar' } ] + ordered: true + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + insert: *collection + writeConcern: { $$exists: false } + $db: *dbWithWC + commandName: insert + + - description: does not retry retryable errors on given command + runOnRequirements: + - minServerVersion: "4.2" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ping] + closeConnection: true + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectError: + isClientError: true + + - description: attaches transaction fields to given command + runOnRequirements: + - minServerVersion: "4.0" + topologies: [ replicaset ] + - minServerVersion: "4.2" + topologies: [ sharded-replicaset, load-balanced ] + operations: + - name: withTransaction + object: *session + arguments: + callback: + - name: runCommand + object: *db + arguments: + session: *session + commandName: insert + command: + insert: *collection + documents: [ { foo: 'transaction' } ] + ordered: true + expectResult: { $$unsetOrMatches: { insertedId: { $$unsetOrMatches: 1 } } } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + insert: *collection + documents: [ { foo: 'transaction' } ] + ordered: true + lsid: { $$sessionLsid: *session } + txnNumber: 1 + startTransaction: true + autocommit: false + # omitted fields + readConcern: { $$exists: false } + writeConcern: { $$exists: false } + commandName: insert + databaseName: *db + - commandStartedEvent: + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session } + txnNumber: 1 + autocommit: false + # omitted fields + writeConcern: { $$exists: false } + readConcern: { $$exists: false } + commandName: commitTransaction + databaseName: admin + + - description: attaches apiVersion fields to given command when stableApi is configured on the client + runOnRequirements: + - minServerVersion: "5.0" + operations: + - name: runCommand + object: *dbWithStableApi + arguments: + commandName: ping + command: + ping: 1 + expectResult: { ok: 1 } + expectEvents: + - client: *clientWithStableApi + events: + - commandStartedEvent: + command: + ping: 1 + $db: *dbWithStableApi + apiVersion: "1" + apiStrict: true + apiDeprecationErrors: { $$unsetOrMatches: false } + commandName: ping diff --git a/testdata/run-command/runCursorCommand.json b/testdata/run-command/runCursorCommand.json new file mode 100644 index 0000000000..4f1ec8a01a --- /dev/null +++ b/testdata/run-command/runCursorCommand.json @@ -0,0 +1,877 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "successfully executes checkMetadataConsistency cursor creating command", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "checkMetadataConsistency", + "command": { + "checkMetadataConsistency": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "checkMetadataConsistency": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "checkMetadataConsistency" + } + } + ] + } + ] + }, + { + "description": "errors if the command response is not a cursor", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "creates an implicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "accepts an explicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is exhausted", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 5, + "x": 55 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is closed", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ] + }, + { + "description": "supports configuring getMore batchSize", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 5, + "command": { + "find": "collection", + "batchSize": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "batchSize": 5, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore maxTimeMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "maxTimeMS": 300, + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1 + } + }, + "ignoreResultAndError": true + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "maxTimeMS": 300, + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "comment": { + "hello": "getMore" + }, + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + } + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "comment": { + "hello": "getMore" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "does not close the cursor when receiving an empty batch", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "cursorType": "tailable", + "commandName": "find", + "batchSize": 2, + "command": { + "find": "cappedCollection", + "tailable": true + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "cappedCollection" + }, + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "cappedCollection" + }, + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "cappedCollection" + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "cappedCollection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/testdata/run-command/runCursorCommand.yml b/testdata/run-command/runCursorCommand.yml new file mode 100644 index 0000000000..1f9bf532c3 --- /dev/null +++ b/testdata/run-command/runCursorCommand.yml @@ -0,0 +1,391 @@ +description: runCursorCommand + +schemaVersion: '1.9' + +createEntities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: [commandStartedEvent, connectionReadyEvent, connectionCheckedOutEvent, connectionCheckedInEvent] + - session: + id: &session session + client: *client + - database: + id: &db db + client: *client + databaseName: *db + - collection: + id: &collection collection + database: *db + collectionName: *collection + +initialData: + - collectionName: collection + databaseName: *db + documents: &documents + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - { _id: 5, x: 55 } + +tests: + # This is what this API was invented to do. + - description: successfully executes checkMetadataConsistency cursor creating command + runOnRequirements: + - minServerVersion: '7.0' + topologies: [sharded] + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: checkMetadataConsistency + command: { checkMetadataConsistency: 1 } + expectEvents: + - client: *client + eventType: command + events: + - commandStartedEvent: + command: + checkMetadataConsistency: 1 + $db: *db + lsid: { $$exists: true } + commandName: checkMetadataConsistency + + - description: errors if the command response is not a cursor + operations: + - name: createCommandCursor + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectError: + isClientError: true + + + # Driver Sessions + - description: creates an implicit session that is reused across getMores + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: find + command: { find: *collection, batchSize: 2 } + expectResult: *documents + - name: assertSameLsidOnLastTwoCommands + object: testRunner + arguments: + client: *client + expectEvents: + - client: *client + eventType: command + events: + - commandStartedEvent: + command: + find: *collection + batchSize: 2 + $db: *db + lsid: { $$exists: true } + commandName: find + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + $db: *db + lsid: { $$exists: true } + commandName: getMore + + - description: accepts an explicit session that is reused across getMores + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: find + session: *session + command: { find: *collection, batchSize: 2 } + expectResult: *documents + - name: assertSameLsidOnLastTwoCommands + object: testRunner + arguments: + client: *client + expectEvents: + - client: *client + eventType: command + events: + - commandStartedEvent: + command: + find: *collection + batchSize: 2 + $db: *db + lsid: { $$sessionLsid: *session } + commandName: find + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + $db: *db + lsid: { $$sessionLsid: *session } + commandName: getMore + + # Load Balancers + - description: returns pinned connections to the pool when the cursor is exhausted + runOnRequirements: + - topologies: [ load-balanced ] + operations: + - name: createCommandCursor + object: *db + arguments: + commandName: find + batchSize: 2 + session: *session + command: { find: *collection, batchSize: 2 } + saveResultAsEntity: &cursor cursor + - name: assertNumberConnectionsCheckedOut + object: testRunner + arguments: + client: *client + connections: 1 + - name: iterateUntilDocumentOrError + object: *cursor + expectResult: { _id: 1, x: 11 } + - name: iterateUntilDocumentOrError + object: *cursor + expectResult: { _id: 2, x: 22 } + - name: iterateUntilDocumentOrError + object: *cursor + expectResult: { _id: 3, x: 33 } + - name: iterateUntilDocumentOrError + object: *cursor + expectResult: { _id: 4, x: 44 } + - name: iterateUntilDocumentOrError + object: *cursor + expectResult: { _id: 5, x: 55 } + - name: assertNumberConnectionsCheckedOut + object: testRunner + arguments: + client: *client + connections: 0 + expectEvents: + - client: *client + eventType: command + events: + - commandStartedEvent: + command: + find: *collection + batchSize: 2 + $db: *db + lsid: { $$sessionLsid: *session } + commandName: find # 2 documents + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + $db: *db + lsid: { $$sessionLsid: *session } + commandName: getMore # 2 documents + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + $db: *db + lsid: { $$sessionLsid: *session } + commandName: getMore # 1 document + # Total documents: 5 + - client: *client + eventType: cmap + events: + - connectionReadyEvent: {} + - connectionCheckedOutEvent: {} + - connectionCheckedInEvent: {} + + - description: returns pinned connections to the pool when the cursor is closed + runOnRequirements: + - topologies: [ load-balanced ] + operations: + - name: createCommandCursor + object: *db + arguments: + commandName: find + command: { find: *collection, batchSize: 2 } + saveResultAsEntity: *cursor + - name: assertNumberConnectionsCheckedOut + object: testRunner + arguments: + client: *client + connections: 1 + - name: close + object: *cursor + - name: assertNumberConnectionsCheckedOut + object: testRunner + arguments: + client: *client + connections: 0 + + # Iterating the Cursor / Executing GetMores + - description: supports configuring getMore batchSize + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: find + batchSize: 5 + command: { find: *collection, batchSize: 1 } + expectResult: *documents + expectEvents: + - client: *client + eventType: command + events: + - commandStartedEvent: + command: + find: *collection + batchSize: 1 + $db: *db + lsid: { $$exists: true } + commandName: find + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + batchSize: 5 + $db: *db + lsid: { $$exists: true } + commandName: getMore + + - description: supports configuring getMore maxTimeMS + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: find + maxTimeMS: 300 + command: { find: *collection, maxTimeMS: 200, batchSize: 1 } + ignoreResultAndError: true + expectEvents: + - client: *client + eventType: command + # The getMore should receive an error here because we do not have the right kind of cursor + # So drivers should run a killCursors, but neither the error nor the killCursors command is relevant to this test + ignoreExtraEvents: true + events: + - commandStartedEvent: + command: + find: *collection + maxTimeMS: 200 + batchSize: 1 + $db: *db + lsid: { $$exists: true } + commandName: find + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + $db: *db + maxTimeMS: 300 + lsid: { $$exists: true } + commandName: getMore + + - description: supports configuring getMore comment + runOnRequirements: + - minServerVersion: '4.4' + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: find + comment: { hello: 'getMore' } + command: { find: *collection, batchSize: 1, comment: { hello: 'find' } } + expectResult: *documents + expectEvents: + - client: *client + eventType: command + events: + - commandStartedEvent: + command: + find: *collection + batchSize: 1 + comment: { hello: 'find' } + $db: *db + lsid: { $$exists: true } + commandName: find + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + comment: { hello: 'getMore' } + $db: *db + lsid: { $$exists: true } + commandName: getMore + + # Tailable cursor + - description: does not close the cursor when receiving an empty batch + runOnRequirements: + - serverless: forbid + operations: + - name: dropCollection + object: *db + arguments: + collection: &cappedCollection cappedCollection + - name: createCollection + object: *db + arguments: + collection: *cappedCollection + capped: true + size: 4096 + max: 3 + saveResultAsEntity: *cappedCollection + - name: insertMany + object: *cappedCollection + arguments: + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - name: createCommandCursor + object: *db + arguments: + cursorType: tailable + commandName: find + batchSize: 2 + command: { find: *cappedCollection, tailable: true } + saveResultAsEntity: &cursor cursor + - name: iterateOnce + object: *cursor + - name: iterateOnce + object: *cursor + - name: iterateOnce + object: *cursor + - name: close + object: *cursor + expectEvents: + - client: *client + eventType: command + events: + - commandStartedEvent: + command: + drop: *cappedCollection + commandName: drop + - commandStartedEvent: + command: + create: *cappedCollection + commandName: create + - commandStartedEvent: + command: + insert: *cappedCollection + commandName: insert + - commandStartedEvent: + command: + find: *cappedCollection + $db: *db + lsid: { $$exists: true } + commandName: find + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *cappedCollection + $db: *db + lsid: { $$exists: true } + commandName: getMore + - commandStartedEvent: + command: + killCursors: *cappedCollection + cursors: { $$type: array } + commandName: killCursors diff --git a/testdata/unified-test-format/valid-pass/entity-commandCursor.json b/testdata/unified-test-format/valid-pass/entity-commandCursor.json new file mode 100644 index 0000000000..72b74b4a9a --- /dev/null +++ b/testdata/unified-test-format/valid-pass/entity-commandCursor.json @@ -0,0 +1,278 @@ +{ + "description": "entity-commandCursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "runCursorCommand creates and exhausts cursor by running getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "createCommandCursor creates a cursor and stores it as an entity that can be iterated one document at a time", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "saveResultAsEntity": "myRunCommandCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 5, + "x": 55 + } + } + ] + }, + { + "description": "createCommandCursor's cursor can be closed and will perform a killCursors operation", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "saveResultAsEntity": "myRunCommandCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "close", + "object": "myRunCommandCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "collection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/testdata/unified-test-format/valid-pass/entity-commandCursor.yml b/testdata/unified-test-format/valid-pass/entity-commandCursor.yml new file mode 100644 index 0000000000..3becf2095a --- /dev/null +++ b/testdata/unified-test-format/valid-pass/entity-commandCursor.yml @@ -0,0 +1,115 @@ +description: entity-commandCursor +schemaVersion: '1.3' +createEntities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: [commandStartedEvent] + - database: + id: &db db + client: *client + databaseName: *db + - collection: + id: &collection collection + database: *db + collectionName: *collection +initialData: + - collectionName: collection + databaseName: *db + documents: &documents + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - { _id: 5, x: 55 } +tests: + - description: runCursorCommand creates and exhausts cursor by running getMores + operations: + - name: runCursorCommand + object: *db + arguments: + commandName: find + batchSize: 2 + command: { find: *collection, filter: {}, batchSize: 2 } + expectResult: *documents + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + find: *collection + filter: {} + batchSize: 2 + $db: *db + lsid: { $$exists: true } + commandName: find + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + $db: *db + lsid: { $$exists: true } + commandName: getMore + - commandStartedEvent: + command: + getMore: { $$type: [int, long] } + collection: *collection + $db: *db + lsid: { $$exists: true } + commandName: getMore + + - description: createCommandCursor creates a cursor and stores it as an entity that can be iterated one document at a time + operations: + - name: createCommandCursor + object: *db + arguments: + commandName: find + batchSize: 2 + command: { find: *collection, filter: {}, batchSize: 2 } + saveResultAsEntity: &myRunCommandCursor myRunCommandCursor + - name: iterateUntilDocumentOrError + object: *myRunCommandCursor + expectResult: { _id: 1, x: 11 } + - name: iterateUntilDocumentOrError + object: *myRunCommandCursor + expectResult: { _id: 2, x: 22 } + - name: iterateUntilDocumentOrError + object: *myRunCommandCursor + expectResult: { _id: 3, x: 33 } + - name: iterateUntilDocumentOrError + object: *myRunCommandCursor + expectResult: { _id: 4, x: 44 } + - name: iterateUntilDocumentOrError + object: *myRunCommandCursor + expectResult: { _id: 5, x: 55 } + + - description: createCommandCursor's cursor can be closed and will perform a killCursors operation + operations: + - name: createCommandCursor + object: *db + arguments: + commandName: find + batchSize: 2 + command: { find: *collection, filter: {}, batchSize: 2 } + saveResultAsEntity: myRunCommandCursor + - name: iterateUntilDocumentOrError + object: *myRunCommandCursor + expectResult: { _id: 1, x: 11 } + - name: close + object: *myRunCommandCursor + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + find: *collection + filter: {} + batchSize: 2 + $db: *db + lsid: { $$exists: true } + commandName: find + - commandStartedEvent: + command: + killCursors: *collection + cursors: { $$type: array } + commandName: killCursors diff --git a/testdata/unified-test-format/valid-pass/poc-sessions.yml b/testdata/unified-test-format/valid-pass/poc-sessions.yml index cb16657da3..a7efd8a6d6 100644 --- a/testdata/unified-test-format/valid-pass/poc-sessions.yml +++ b/testdata/unified-test-format/valid-pass/poc-sessions.yml @@ -180,7 +180,7 @@ tests: - commandStartedEvent: &insert_attempt command: insert: *collection0Name - documents: + documents: - { _id: 2 } ordered: true lsid: { $$sessionLsid: *session0 } @@ -191,7 +191,7 @@ tests: - commandStartedEvent: command: insert: *collection0Name - documents: + documents: - { _id: 3 } ordered: true lsid: { $$sessionLsid: *session0 } diff --git a/x/mongo/driver/batch_cursor.go b/x/mongo/driver/batch_cursor.go index 96df52b5f4..b211a7b8d7 100644 --- a/x/mongo/driver/batch_cursor.go +++ b/x/mongo/driver/batch_cursor.go @@ -10,11 +10,15 @@ import ( "context" "errors" "fmt" + "io" "strings" + "time" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/codecutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -29,7 +33,8 @@ var ErrNoCursor = errors.New("database response does not contain a cursor") type BatchCursor struct { clientSession *session.Client clock *session.ClusterClock - comment bsoncore.Value + comment interface{} + encoderFn codecutil.EncoderFn database string collection string id int64 @@ -147,13 +152,14 @@ func NewCursorResponse(info ResponseInfo) (CursorResponse, error) { // CursorOptions are extra options that are required to construct a BatchCursor. type CursorOptions struct { - BatchSize int32 - Comment bsoncore.Value - MaxTimeMS int64 - Limit int32 - CommandMonitor *event.CommandMonitor - Crypt Crypt - ServerAPI *ServerAPIOptions + BatchSize int32 + Comment bsoncore.Value + MaxTimeMS int64 + Limit int32 + CommandMonitor *event.CommandMonitor + Crypt Crypt + ServerAPI *ServerAPIOptions + MarshalValueEncoderFn func(io.Writer) (*bson.Encoder, error) } // NewBatchCursor creates a new BatchCursor from the provided parameters. @@ -177,6 +183,7 @@ func NewBatchCursor(cr CursorResponse, clientSession *session.Client, clock *ses crypt: opts.Crypt, serverAPI: opts.ServerAPI, serverDescription: cr.Desc, + encoderFn: opts.MarshalValueEncoderFn, } if ds != nil { @@ -365,10 +372,17 @@ func (bc *BatchCursor) getMore(ctx context.Context) { if bc.maxTimeMS > 0 { dst = bsoncore.AppendInt64Element(dst, "maxTimeMS", bc.maxTimeMS) } + + comment, err := codecutil.MarshalValue(bc.comment, bc.encoderFn) + if err != nil { + return nil, fmt.Errorf("error marshaling comment as a BSON value: %w", err) + } + // The getMore command does not support commenting pre-4.4. - if bc.comment.Type != bsontype.Type(0) && bc.serverDescription.WireVersion.Max >= 9 { - dst = bsoncore.AppendValueElement(dst, "comment", bc.comment) + if comment.Type != bsontype.Type(0) && bc.serverDescription.WireVersion.Max >= 9 { + dst = bsoncore.AppendValueElement(dst, "comment", comment) } + return dst, nil }, Database: bc.database, @@ -444,11 +458,26 @@ func (bc *BatchCursor) PostBatchResumeToken() bsoncore.Document { return bc.postBatchResumeToken } -// SetBatchSize sets the batchSize for future getMores. +// SetBatchSize sets the batchSize for future getMore operations. func (bc *BatchCursor) SetBatchSize(size int32) { bc.batchSize = size } +// SetMaxTime will set the maximum amount of time the server will allow the +// operations to execute. The server will error if this field is set but the +// cursor is not configured with awaitData=true. +// +// The time.Duration value passed by this setter will be converted and rounded +// down to the nearest millisecond. +func (bc *BatchCursor) SetMaxTime(dur time.Duration) { + bc.maxTimeMS = int64(dur / time.Millisecond) +} + +// SetComment sets the comment for future getMore operations. +func (bc *BatchCursor) SetComment(comment interface{}) { + bc.comment = comment +} + func (bc *BatchCursor) getOperationDeployment() Deployment { if bc.connection != nil { return &loadBalancedCursorDeployment{ diff --git a/x/mongo/driver/batch_cursor_test.go b/x/mongo/driver/batch_cursor_test.go index 7c9ad38c7b..c57434cb83 100644 --- a/x/mongo/driver/batch_cursor_test.go +++ b/x/mongo/driver/batch_cursor_test.go @@ -8,6 +8,7 @@ package driver import ( "testing" + "time" "go.mongodb.org/mongo-driver/internal/assert" ) @@ -90,3 +91,43 @@ func TestBatchCursor(t *testing.T) { } }) } + +func TestBatchCursorSetMaxTime(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + dur time.Duration + want int64 + }{ + { + name: "empty", + dur: 0, + want: 0, + }, + { + name: "partial milliseconds are truncated", + dur: 10_900 * time.Microsecond, + want: 10, + }, + { + name: "millisecond input", + dur: 10 * time.Millisecond, + want: 10, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + bc := BatchCursor{} + bc.SetMaxTime(test.dur) + + got := bc.maxTimeMS + assert.Equal(t, test.want, got, "expected and actual maxTimeMS are different") + }) + } +} diff --git a/x/mongo/driver/list_collections_batch_cursor.go b/x/mongo/driver/list_collections_batch_cursor.go index 3917218b76..411c203c7a 100644 --- a/x/mongo/driver/list_collections_batch_cursor.go +++ b/x/mongo/driver/list_collections_batch_cursor.go @@ -11,6 +11,7 @@ import ( "errors" "io" "strings" + "time" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) @@ -26,6 +27,8 @@ type ListCollectionsBatchCursor struct { // NewListCollectionsBatchCursor creates a new non-legacy ListCollectionsCursor. func NewListCollectionsBatchCursor(bc *BatchCursor) (*ListCollectionsBatchCursor, error) { + // TODO(GODRIVER-2861): Merge ListCollectionsBatchCursor into + // BatchCursor. if bc == nil { return nil, errors.New("batch cursor must not be nil") } @@ -128,7 +131,26 @@ func (*ListCollectionsBatchCursor) projectNameElement(rawDoc bsoncore.Document) return filteredDoc, nil } -// SetBatchSize sets the batchSize for future getMores. +// SetBatchSize sets the number of documents to fetch from the database with +// each iteration of the cursor's "Next" method. Note that some operations set +// an initial cursor batch size, so this setting only affects subsequent +// document batches fetched from the database. func (lcbc *ListCollectionsBatchCursor) SetBatchSize(size int32) { lcbc.bc.SetBatchSize(size) } + +// SetMaxTime will set the maximum amount of time the server will allow the +// operations to execute. The server will error if this field is set but the +// cursor is not configured with awaitData=true. +// +// The time.Duration value passed by this setter will be converted and rounded +// down to the nearest millisecond. +func (lcbc *ListCollectionsBatchCursor) SetMaxTime(dur time.Duration) { + lcbc.bc.SetMaxTime(dur) +} + +// SetComment will set a user-configurable comment that can be used to identify +// the operation in server logs. +func (lcbc *ListCollectionsBatchCursor) SetComment(comment interface{}) { + lcbc.bc.SetComment(comment) +} diff --git a/x/mongo/driver/operation.go b/x/mongo/driver/operation.go index beb8651cff..4f998f7cc7 100644 --- a/x/mongo/driver/operation.go +++ b/x/mongo/driver/operation.go @@ -1514,6 +1514,9 @@ func (op Operation) getReadPrefBasedOnTransaction() (*readpref.ReadPref, error) return op.ReadPreference, nil } +// createReadPref will attempt to create a document with the "readPreference" +// object and various related fields such as "mode", "tags", and +// "maxStalenessSeconds". func (op Operation) createReadPref(desc description.SelectedServer, isOpQuery bool) (bsoncore.Document, error) { // TODO(GODRIVER-2231): Instead of checking if isOutputAggregate and desc.Server.WireVersion.Max < 13, somehow check // TODO if supplied readPreference was "overwritten" with primary in description.selectForReplicaSet. @@ -1553,7 +1556,14 @@ func (op Operation) createReadPref(desc description.SelectedServer, isOpQuery bo doc, _ = bsoncore.AppendDocumentEnd(doc, idx) return doc, nil } - doc = bsoncore.AppendStringElement(doc, "mode", "primary") + + // OP_MSG requires never sending read preference "primary" + // except for topology "single". + // + // It is important to note that although the Go Driver does not + // support legacy opcodes, OP_QUERY has different rules for + // adding read preference to commands. + return nil, nil case readpref.PrimaryPreferredMode: doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred") case readpref.SecondaryPreferredMode: diff --git a/x/mongo/driver/operation_test.go b/x/mongo/driver/operation_test.go index 49bd46d8fc..323d597d84 100644 --- a/x/mongo/driver/operation_test.go +++ b/x/mongo/driver/operation_test.go @@ -414,7 +414,6 @@ func TestOperation(t *testing.T) { ) rpPrimaryPreferred := bsoncore.BuildDocumentFromElements(nil, bsoncore.AppendStringElement(nil, "mode", "primaryPreferred")) - rpPrimary := bsoncore.BuildDocumentFromElements(nil, bsoncore.AppendStringElement(nil, "mode", "primary")) rpSecondaryPreferred := bsoncore.BuildDocumentFromElements(nil, bsoncore.AppendStringElement(nil, "mode", "secondaryPreferred")) rpSecondary := bsoncore.BuildDocumentFromElements(nil, bsoncore.AppendStringElement(nil, "mode", "secondary")) rpNearest := bsoncore.BuildDocumentFromElements(nil, bsoncore.AppendStringElement(nil, "mode", "nearest")) @@ -431,7 +430,7 @@ func TestOperation(t *testing.T) { {"nil/single/secondary", nil, description.RSSecondary, description.Single, false, rpPrimaryPreferred}, {"primary/mongos", readpref.Primary(), description.Mongos, description.Sharded, false, nil}, {"primary/single", readpref.Primary(), description.RSPrimary, description.Single, false, rpPrimaryPreferred}, - {"primary/primary", readpref.Primary(), description.RSPrimary, description.ReplicaSet, false, rpPrimary}, + {"primary/primary", readpref.Primary(), description.RSPrimary, description.ReplicaSet, false, nil}, {"primaryPreferred", readpref.PrimaryPreferred(), description.RSSecondary, description.ReplicaSet, false, rpPrimaryPreferred}, {"secondaryPreferred/mongos/opquery", readpref.SecondaryPreferred(), description.Mongos, description.Sharded, true, nil}, {"secondaryPreferred", readpref.SecondaryPreferred(), description.RSSecondary, description.ReplicaSet, false, rpSecondaryPreferred}, From 6499fe2f55b8c0b281e738a8310b76d11d2e5499 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 18 Jul 2023 11:44:10 -0500 Subject: [PATCH 08/15] GODRIVER-2913 Add global CODEOWNERS File (#1324) --- docs/CODEOWNERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 docs/CODEOWNERS diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS new file mode 100644 index 0000000000..ecf6ff135b --- /dev/null +++ b/docs/CODEOWNERS @@ -0,0 +1,2 @@ +# Global owner for repo +* @mongodb/dbx-go From cf2b7eb5cc0f09cd1e723a7b4faf32e377582926 Mon Sep 17 00:00:00 2001 From: Matt Dale <9760375+matthewdale@users.noreply.github.com> Date: Tue, 18 Jul 2023 19:09:45 -0700 Subject: [PATCH 09/15] Always add relevant errors to pool events. (#1322) --- x/mongo/driver/topology/pool.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/x/mongo/driver/topology/pool.go b/x/mongo/driver/topology/pool.go index da40fd6a80..5d2369352e 100644 --- a/x/mongo/driver/topology/pool.go +++ b/x/mongo/driver/topology/pool.go @@ -500,6 +500,7 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { Type: event.GetFailed, Address: p.address.String(), Reason: event.ReasonConnectionErrored, + Error: err, }) } return nil, err @@ -542,6 +543,7 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { Type: event.GetFailed, Address: p.address.String(), Reason: event.ReasonConnectionErrored, + Error: w.err, }) } return nil, w.err @@ -589,6 +591,7 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { Type: event.GetFailed, Address: p.address.String(), Reason: event.ReasonConnectionErrored, + Error: w.err, }) } @@ -625,6 +628,7 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { Type: event.GetFailed, Address: p.address.String(), Reason: event.ReasonTimedOut, + Error: ctx.Err(), }) } @@ -878,6 +882,7 @@ func (p *pool) clear(err error, serviceID *primitive.ObjectID) { Type: event.PoolCleared, Address: p.address.String(), ServiceID: serviceID, + Error: err, }) } } From bbeed7ad95557aed4b2bea61baabffc2bd95f814 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 19 Jul 2023 09:34:22 -0500 Subject: [PATCH 10/15] GODRIVER-2900 Fix handling of EC2 Instance Profile in EG Tests (#1328) --- .evergreen/config.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9cf59667a0..8513e5ba22 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -715,6 +715,17 @@ functions: # balancer is not running. DRIVERS_TOOLS=${DRIVERS_TOOLS} MONGODB_URI=${MONGODB_URI} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop || echo "Ignoring load balancer stop error" + teardown-aws: + - command: shell.exec + params: + shell: "bash" + script: | + cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" + if [ -f "./aws_e2e_setup.json" ]; then + . ./activate-authawsvenv.sh + python ./lib/aws_assign_instance_profile.py + fi + add-aws-auth-variables-to-file: - command: shell.exec type: test @@ -1215,6 +1226,7 @@ post: - "src/go.mongodb.org/mongo-driver/*.suite" - func: upload-mo-artifacts - func: stop-load-balancer + - func: teardown-aws - func: cleanup tasks: From acf1235ec3c3f51b63e724c6ac7c5adfe3077811 Mon Sep 17 00:00:00 2001 From: Preston Vasquez Date: Wed, 19 Jul 2023 09:11:01 -0600 Subject: [PATCH 11/15] GODRIVER-2585 Add log messages to SDAM (#1305) --- internal/logger/component.go | 153 +++-- internal/logger/component_test.go | 226 +++++++ internal/logger/logger.go | 7 + mongo/integration/unified/client_entity.go | 77 ++- mongo/integration/unified/context.go | 17 +- mongo/integration/unified/entity.go | 14 + mongo/integration/unified/event.go | 42 +- .../unified/logger_verification.go | 29 +- .../unified/logger_verification_test.go | 84 +++ mongo/integration/unified/schema_version.go | 2 +- .../unified/testrunner_operation.go | 4 + .../unified/unified_spec_runner.go | 26 +- .../integration/unified/unified_spec_test.go | 1 + .../unified/logging-loadbalanced.json | 150 +++++ .../unified/logging-loadbalanced.yml | 74 +++ .../unified/logging-replicaset.json | 606 ++++++++++++++++++ .../unified/logging-replicaset.yml | 289 +++++++++ .../unified/logging-sharded.json | 492 ++++++++++++++ .../unified/logging-sharded.yml | 248 +++++++ .../unified/logging-standalone.json | 517 +++++++++++++++ .../unified/logging-standalone.yml | 258 ++++++++ ...gesForClient-ignoreExtraMessages-type.json | 24 + ...agesForClient-ignoreExtraMessages-type.yml | 15 + ...essagesForClient-ignoreMessages-items.json | 26 + ...MessagesForClient-ignoreMessages-items.yml | 15 + ...MessagesForClient-ignoreMessages-type.json | 24 + ...gMessagesForClient-ignoreMessages-type.yml | 15 + x/mongo/driver/topology/server.go | 59 +- x/mongo/driver/topology/topology.go | 42 ++ x/mongo/driver/topology/topology_options.go | 29 +- 30 files changed, 3464 insertions(+), 101 deletions(-) create mode 100644 internal/logger/component_test.go create mode 100644 mongo/integration/unified/logger_verification_test.go create mode 100644 testdata/server-discovery-and-monitoring/unified/logging-loadbalanced.json create mode 100644 testdata/server-discovery-and-monitoring/unified/logging-loadbalanced.yml create mode 100644 testdata/server-discovery-and-monitoring/unified/logging-replicaset.json create mode 100644 testdata/server-discovery-and-monitoring/unified/logging-replicaset.yml create mode 100644 testdata/server-discovery-and-monitoring/unified/logging-sharded.json create mode 100644 testdata/server-discovery-and-monitoring/unified/logging-sharded.yml create mode 100644 testdata/server-discovery-and-monitoring/unified/logging-standalone.json create mode 100644 testdata/server-discovery-and-monitoring/unified/logging-standalone.yml create mode 100644 testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreExtraMessages-type.json create mode 100644 testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreExtraMessages-type.yml create mode 100644 testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-items.json create mode 100644 testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-items.yml create mode 100644 testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-type.json create mode 100644 testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-type.yml diff --git a/internal/logger/component.go b/internal/logger/component.go index da9c43de43..fdd6d42405 100644 --- a/internal/logger/component.go +++ b/internal/logger/component.go @@ -14,44 +14,56 @@ import ( ) const ( - CommandFailed = "Command failed" - CommandStarted = "Command started" - CommandSucceeded = "Command succeeded" - ConnectionPoolCreated = "Connection pool created" - ConnectionPoolReady = "Connection pool ready" - ConnectionPoolCleared = "Connection pool cleared" - ConnectionPoolClosed = "Connection pool closed" - ConnectionCreated = "Connection created" - ConnectionReady = "Connection ready" - ConnectionClosed = "Connection closed" - ConnectionCheckoutStarted = "Connection checkout started" - ConnectionCheckoutFailed = "Connection checkout failed" - ConnectionCheckedOut = "Connection checked out" - ConnectionCheckedIn = "Connection checked in" + CommandFailed = "Command failed" + CommandStarted = "Command started" + CommandSucceeded = "Command succeeded" + ConnectionPoolCreated = "Connection pool created" + ConnectionPoolReady = "Connection pool ready" + ConnectionPoolCleared = "Connection pool cleared" + ConnectionPoolClosed = "Connection pool closed" + ConnectionCreated = "Connection created" + ConnectionReady = "Connection ready" + ConnectionClosed = "Connection closed" + ConnectionCheckoutStarted = "Connection checkout started" + ConnectionCheckoutFailed = "Connection checkout failed" + ConnectionCheckedOut = "Connection checked out" + ConnectionCheckedIn = "Connection checked in" + TopologyClosed = "Stopped topology monitoring" + TopologyDescriptionChanged = "Topology description changed" + TopologyOpening = "Starting topology monitoring" + TopologyServerClosed = "Stopped server monitoring" + TopologyServerHeartbeatFailed = "Server heartbeat failed" + TopologyServerHeartbeatStarted = "Server heartbeat started" + TopologyServerHeartbeatSucceeded = "Server heartbeat succeeded" + TopologyServerOpening = "Starting server monitoring" ) const ( - KeyCommand = "command" - KeyCommandName = "commandName" - KeyDatabaseName = "databaseName" - KeyDriverConnectionID = "driverConnectionId" - KeyDurationMS = "durationMS" - KeyError = "error" - KeyFailure = "failure" - KeyMaxConnecting = "maxConnecting" - KeyMaxIdleTimeMS = "maxIdleTimeMS" - KeyMaxPoolSize = "maxPoolSize" - KeyMessage = "message" - KeyMinPoolSize = "minPoolSize" - KeyOperationID = "operationId" - KeyReason = "reason" - KeyReply = "reply" - KeyRequestID = "requestId" - KeyServerConnectionID = "serverConnectionId" - KeyServerHost = "serverHost" - KeyServerPort = "serverPort" - KeyServiceID = "serviceId" - KeyTimestamp = "timestamp" + KeyAwaited = "awaited" + KeyCommand = "command" + KeyCommandName = "commandName" + KeyDatabaseName = "databaseName" + KeyDriverConnectionID = "driverConnectionId" + KeyDurationMS = "durationMS" + KeyError = "error" + KeyFailure = "failure" + KeyMaxConnecting = "maxConnecting" + KeyMaxIdleTimeMS = "maxIdleTimeMS" + KeyMaxPoolSize = "maxPoolSize" + KeyMessage = "message" + KeyMinPoolSize = "minPoolSize" + KeyNewDescription = "newDescription" + KeyOperationID = "operationId" + KeyPreviousDescription = "previousDescription" + KeyReason = "reason" + KeyReply = "reply" + KeyRequestID = "requestId" + KeyServerConnectionID = "serverConnectionId" + KeyServerHost = "serverHost" + KeyServerPort = "serverPort" + KeyServiceID = "serviceId" + KeyTimestamp = "timestamp" + KeyTopologyID = "topologyId" ) type KeyValues []interface{} @@ -137,7 +149,7 @@ type Command struct { // SerializeCommand takes a command and a variable number of key-value pairs and // returns a slice of interface{} that can be passed to the logger for // structured logging. -func SerializeCommand(cmd Command, extraKeysAndValues ...interface{}) []interface{} { +func SerializeCommand(cmd Command, extraKeysAndValues ...interface{}) KeyValues { // Initialize the boilerplate keys and values. keysAndValues := KeyValues{ KeyCommandName, cmd.Name, @@ -153,7 +165,7 @@ func SerializeCommand(cmd Command, extraKeysAndValues ...interface{}) []interfac keysAndValues.Add(extraKeysAndValues[i].(string), extraKeysAndValues[i+1]) } - port, err := strconv.ParseInt(cmd.ServerPort, 0, 32) + port, err := strconv.ParseInt(cmd.ServerPort, 10, 32) if err == nil { keysAndValues.Add(KeyServerPort, port) } @@ -178,9 +190,9 @@ type Connection struct { ServerPort string // Port for the server } -// SerializeConnection serializes a ConnectionMessage into a slice of keys -// and values that can be passed to a logger. -func SerializeConnection(conn Connection, extraKeysAndValues ...interface{}) []interface{} { +// SerializeConnection serializes a Connection message into a slice of keys and +// values that can be passed to a logger. +func SerializeConnection(conn Connection, extraKeysAndValues ...interface{}) KeyValues { // Initialize the boilerplate keys and values. keysAndValues := KeyValues{ KeyMessage, conn.Message, @@ -192,10 +204,69 @@ func SerializeConnection(conn Connection, extraKeysAndValues ...interface{}) []i keysAndValues.Add(extraKeysAndValues[i].(string), extraKeysAndValues[i+1]) } - port, err := strconv.ParseInt(conn.ServerPort, 0, 32) + port, err := strconv.ParseInt(conn.ServerPort, 10, 32) if err == nil { keysAndValues.Add(KeyServerPort, port) } return keysAndValues } + +// Server contains data that all server messages MAY contain. +type Server struct { + DriverConnectionID uint64 // Driver's ID for the connection + TopologyID primitive.ObjectID // Driver's unique ID for this topology + Message string // Message associated with the topology + ServerConnectionID *int64 // Server's ID for the connection + ServerHost string // Hostname or IP address for the server + ServerPort string // Port for the server +} + +// SerializeServer serializes a Server message into a slice of keys and +// values that can be passed to a logger. +func SerializeServer(srv Server, extraKV ...interface{}) KeyValues { + // Initialize the boilerplate keys and values. + keysAndValues := KeyValues{ + KeyDriverConnectionID, srv.DriverConnectionID, + KeyMessage, srv.Message, + KeyServerHost, srv.ServerHost, + KeyTopologyID, srv.TopologyID.Hex(), + } + + if connID := srv.ServerConnectionID; connID != nil { + keysAndValues.Add(KeyServerConnectionID, *connID) + } + + port, err := strconv.ParseInt(srv.ServerPort, 10, 32) + if err == nil { + keysAndValues.Add(KeyServerPort, port) + } + + // Add the optional keys and values. + for i := 0; i < len(extraKV); i += 2 { + keysAndValues.Add(extraKV[i].(string), extraKV[i+1]) + } + + return keysAndValues +} + +// Topology contains data that all topology messages MAY contain. +type Topology struct { + ID primitive.ObjectID // Driver's unique ID for this topology + Message string // Message associated with the topology +} + +// SerializeTopology serializes a Topology message into a slice of keys and +// values that can be passed to a logger. +func SerializeTopology(topo Topology, extraKV ...interface{}) KeyValues { + keysAndValues := KeyValues{ + KeyTopologyID, topo.ID.Hex(), + } + + // Add the optional keys and values. + for i := 0; i < len(extraKV); i += 2 { + keysAndValues.Add(extraKV[i].(string), extraKV[i+1]) + } + + return keysAndValues +} diff --git a/internal/logger/component_test.go b/internal/logger/component_test.go new file mode 100644 index 0000000000..e7a1942c3d --- /dev/null +++ b/internal/logger/component_test.go @@ -0,0 +1,226 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package logger + +import ( + "testing" + + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal/assert" +) + +func verifySerialization(t *testing.T, got, want KeyValues) { + t.Helper() + + for i := 0; i < len(got); i += 2 { + assert.Equal(t, want[i], got[i], "key position mismatch") + assert.Equal(t, want[i+1], got[i+1], "value position mismatch for %q", want[i]) + } +} + +func TestSerializeCommand(t *testing.T) { + t.Parallel() + + serverConnectionID := int64(100) + serviceID := primitive.NewObjectID() + + tests := []struct { + name string + cmd Command + extraKeysAndValues []interface{} + want KeyValues + }{ + { + name: "empty", + want: KeyValues{ + KeyCommandName, "", + KeyDriverConnectionID, uint64(0), + KeyMessage, "", + KeyOperationID, int32(0), + KeyRequestID, int64(0), + KeyServerHost, "", + }, + }, + { + name: "complete Command object", + cmd: Command{ + DriverConnectionID: 1, + Name: "foo", + Message: "bar", + OperationID: 2, + RequestID: 3, + ServerHost: "localhost", + ServerPort: "27017", + ServerConnectionID: &serverConnectionID, + ServiceID: &serviceID, + }, + want: KeyValues{ + KeyCommandName, "foo", + KeyDriverConnectionID, uint64(1), + KeyMessage, "bar", + KeyOperationID, int32(2), + KeyRequestID, int64(3), + KeyServerHost, "localhost", + KeyServerPort, int64(27017), + KeyServerConnectionID, serverConnectionID, + KeyServiceID, serviceID.Hex(), + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + got := SerializeCommand(test.cmd, test.extraKeysAndValues...) + verifySerialization(t, got, test.want) + }) + } +} + +func TestSerializeConnection(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + conn Connection + extraKeysAndValues []interface{} + want KeyValues + }{ + { + name: "empty", + want: KeyValues{ + KeyMessage, "", + KeyServerHost, "", + }, + }, + { + name: "complete Connection object", + conn: Connection{ + Message: "foo", + ServerHost: "localhost", + ServerPort: "27017", + }, + want: KeyValues{ + "message", "foo", + "serverHost", "localhost", + "serverPort", int64(27017), + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + got := SerializeConnection(test.conn, test.extraKeysAndValues...) + verifySerialization(t, got, test.want) + }) + } +} + +func TestSerializeServer(t *testing.T) { + t.Parallel() + + topologyID := primitive.NewObjectID() + serverConnectionID := int64(100) + + tests := []struct { + name string + srv Server + extraKeysAndValues []interface{} + want KeyValues + }{ + { + name: "empty", + want: KeyValues{ + KeyDriverConnectionID, uint64(0), + KeyMessage, "", + KeyServerHost, "", + KeyTopologyID, primitive.ObjectID{}.Hex(), + }, + }, + { + name: "complete Server object", + srv: Server{ + DriverConnectionID: 1, + TopologyID: topologyID, + Message: "foo", + ServerConnectionID: &serverConnectionID, + ServerHost: "localhost", + ServerPort: "27017", + }, + want: KeyValues{ + KeyDriverConnectionID, uint64(1), + KeyMessage, "foo", + KeyServerHost, "localhost", + KeyTopologyID, topologyID.Hex(), + KeyServerConnectionID, serverConnectionID, + KeyServerPort, int64(27017), + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + got := SerializeServer(test.srv, test.extraKeysAndValues...) + verifySerialization(t, got, test.want) + }) + } + +} + +func TestSerializeTopology(t *testing.T) { + t.Parallel() + + topologyID := primitive.NewObjectID() + + tests := []struct { + name string + topo Topology + extraKeysAndValues []interface{} + want KeyValues + }{ + { + name: "empty", + want: KeyValues{ + KeyTopologyID, primitive.ObjectID{}.Hex(), + }, + }, + { + name: "complete Server object", + topo: Topology{ + ID: topologyID, + Message: "foo", + }, + want: KeyValues{ + KeyTopologyID, topologyID.Hex(), + KeyMessage, "foo", + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + got := SerializeTopology(test.topo, test.extraKeysAndValues...) + verifySerialization(t, got, test.want) + }) + } + +} diff --git a/internal/logger/logger.go b/internal/logger/logger.go index c4053ea3df..137fb943ad 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -83,6 +83,13 @@ func (logger *Logger) LevelComponentEnabled(level Level, component Component) bo // Print will synchronously print the given message to the configured LogSink. // If the LogSink is nil, then this method will do nothing. Future work could be done to make // this method asynchronous, see buffer management in libraries such as log4j. +// +// It's worth noting that many structured logs defined by DBX-wide +// specifications include a "message" field, which is often shared with the +// message arguments passed to this print function. The "Info" method used by +// this function is implemented based on the go-logr/logr LogSink interface, +// which is why "Print" has a message parameter. Any duplication in code is +// intentional to adhere to the logr pattern. func (logger *Logger) Print(level Level, component Component, msg string, keysAndValues ...interface{}) { // If the level is not enabled for the component, then // skip the message. diff --git a/mongo/integration/unified/client_entity.go b/mongo/integration/unified/client_entity.go index e7d37fc66c..56301c9bb8 100644 --- a/mongo/integration/unified/client_entity.go +++ b/mongo/integration/unified/client_entity.go @@ -44,15 +44,19 @@ type clientEntity struct { *mongo.Client disconnected bool - recordEvents atomic.Value - started []*event.CommandStartedEvent - succeeded []*event.CommandSucceededEvent - failed []*event.CommandFailedEvent - pooled []*event.PoolEvent - serverDescriptionChanged []*event.ServerDescriptionChangedEvent - ignoredCommands map[string]struct{} - observeSensitiveCommands *bool - numConnsCheckedOut int32 + recordEvents atomic.Value + started []*event.CommandStartedEvent + succeeded []*event.CommandSucceededEvent + failed []*event.CommandFailedEvent + pooled []*event.PoolEvent + serverDescriptionChanged []*event.ServerDescriptionChangedEvent + serverHeartbeatFailedEvent []*event.ServerHeartbeatFailedEvent + serverHeartbeatStartedEvent []*event.ServerHeartbeatStartedEvent + serverHeartbeatSucceeded []*event.ServerHeartbeatSucceededEvent + topologyDescriptionChanged []*event.TopologyDescriptionChangedEvent + ignoredCommands map[string]struct{} + observeSensitiveCommands *bool + numConnsCheckedOut int32 // These should not be changed after the clientEntity is initialized observedEvents map[monitoringEventType]struct{} @@ -117,7 +121,6 @@ func newClientEntity(ctx context.Context, em *EntityMap, entityOptions *entityOp SetComponentLevel(options.LogComponentConnection, wrap(olm.Connection)). SetMaxDocumentLength(defaultMaxDocumentLen). SetSink(clientLogger) - } // UseMultipleMongoses requires validation when connecting to a sharded cluster. Options changes and validation are @@ -144,7 +147,11 @@ func newClientEntity(ctx context.Context, em *EntityMap, entityOptions *entityOp } serverMonitor := &event.ServerMonitor{ - ServerDescriptionChanged: entity.processServerDescriptionChangedEvent, + ServerDescriptionChanged: entity.processServerDescriptionChangedEvent, + ServerHeartbeatFailed: entity.processServerHeartbeatFailedEvent, + ServerHeartbeatStarted: entity.processServerHeartbeatStartedEvent, + ServerHeartbeatSucceeded: entity.processServerHeartbeatSucceededEvent, + TopologyDescriptionChanged: entity.processTopologyDescriptionChangedEvent, } clientOpts.SetMonitor(commandMonitor).SetPoolMonitor(poolMonitor).SetServerMonitor(serverMonitor) @@ -458,6 +465,54 @@ func (c *clientEntity) processServerDescriptionChangedEvent(evt *event.ServerDes c.addEventsCount(serverDescriptionChangedEvent) } +func (c *clientEntity) processServerHeartbeatFailedEvent(evt *event.ServerHeartbeatFailedEvent) { + if !c.getRecordEvents() { + return + } + + if _, ok := c.observedEvents[serverHeartbeatFailedEvent]; ok { + c.serverHeartbeatFailedEvent = append(c.serverHeartbeatFailedEvent, evt) + } + + c.addEventsCount(serverHeartbeatFailedEvent) +} + +func (c *clientEntity) processServerHeartbeatStartedEvent(evt *event.ServerHeartbeatStartedEvent) { + if !c.getRecordEvents() { + return + } + + if _, ok := c.observedEvents[serverHeartbeatStartedEvent]; ok { + c.serverHeartbeatStartedEvent = append(c.serverHeartbeatStartedEvent, evt) + } + + c.addEventsCount(serverHeartbeatStartedEvent) +} + +func (c *clientEntity) processServerHeartbeatSucceededEvent(evt *event.ServerHeartbeatSucceededEvent) { + if !c.getRecordEvents() { + return + } + + if _, ok := c.observedEvents[serverHeartbeatSucceededEvent]; ok { + c.serverHeartbeatSucceeded = append(c.serverHeartbeatSucceeded, evt) + } + + c.addEventsCount(serverHeartbeatSucceededEvent) +} + +func (c *clientEntity) processTopologyDescriptionChangedEvent(evt *event.TopologyDescriptionChangedEvent) { + if !c.getRecordEvents() { + return + } + + if _, ok := c.observedEvents[topologyDescriptionChangedEvent]; ok { + c.topologyDescriptionChanged = append(c.topologyDescriptionChanged, evt) + } + + c.addEventsCount(topologyDescriptionChangedEvent) +} + func (c *clientEntity) setRecordEvents(record bool) { c.recordEvents.Store(record) } diff --git a/mongo/integration/unified/context.go b/mongo/integration/unified/context.go index b73b7fd325..d97abaf023 100644 --- a/mongo/integration/unified/context.go +++ b/mongo/integration/unified/context.go @@ -17,6 +17,11 @@ import ( type ctxKey string const ( + // operationalFailPoint indicates that the test case contains an + // operation that sets a failpoint. This information is useful in + // determining if a client entity (file or operational level) should + // specify a reduced value for heartbeatFrequencyMS. + operationalFailPointKey ctxKey = "operational-fail-point" // entitiesKey is used to store an entityMap instance in a Context. entitiesKey ctxKey = "test-entities" // failPointsKey is used to store a map from a fail point name to the Client instance used to configure it. @@ -29,7 +34,13 @@ const ( // newTestContext creates a new Context derived from ctx with values initialized to store the state required for test // execution. -func newTestContext(ctx context.Context, entityMap *EntityMap, expectedLogMessageCount int) context.Context { +func newTestContext( + ctx context.Context, + entityMap *EntityMap, + expectedLogMessageCount int, + hasOperationalFailPoint bool, +) context.Context { + ctx = context.WithValue(ctx, operationalFailPointKey, hasOperationalFailPoint) ctx = context.WithValue(ctx, entitiesKey, entityMap) ctx = context.WithValue(ctx, failPointsKey, make(map[string]*mongo.Client)) ctx = context.WithValue(ctx, targetedFailPointsKey, make(map[string]string)) @@ -61,6 +72,10 @@ func failPoints(ctx context.Context) map[string]*mongo.Client { return ctx.Value(failPointsKey).(map[string]*mongo.Client) } +func hasOperationalFailpoint(ctx context.Context) bool { + return ctx.Value(operationalFailPointKey).(bool) +} + func targetedFailPoints(ctx context.Context) map[string]string { return ctx.Value(targetedFailPointsKey).(map[string]string) } diff --git a/mongo/integration/unified/entity.go b/mongo/integration/unified/entity.go index 7c916f0e01..06de9b6e78 100644 --- a/mongo/integration/unified/entity.go +++ b/mongo/integration/unified/entity.go @@ -15,6 +15,7 @@ import ( "os" "sync" "sync/atomic" + "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" @@ -84,6 +85,19 @@ type entityOptions struct { ClientEncryptionOpts *clientEncryptionOpts `bson:"clientEncryptionOpts"` } +func (eo *entityOptions) setHeartbeatFrequencyMS(freq time.Duration) { + if eo.URIOptions == nil { + eo.URIOptions = make(bson.M) + } + + if _, ok := eo.URIOptions["heartbeatFrequencyMS"]; !ok { + // The UST values for heartbeatFrequencyMS are given as int32, + // so we need to cast the frequency as int32 before setting it + // on the URIOptions map. + eo.URIOptions["heartbeatFrequencyMS"] = int32(freq.Milliseconds()) + } +} + // newCollectionEntityOptions constructs an entity options object for a // collection. func newCollectionEntityOptions(id string, databaseID string, collectionName string, diff --git a/mongo/integration/unified/event.go b/mongo/integration/unified/event.go index ad9a0610d3..b3c0ebf15c 100644 --- a/mongo/integration/unified/event.go +++ b/mongo/integration/unified/event.go @@ -15,21 +15,25 @@ import ( type monitoringEventType string const ( - commandStartedEvent monitoringEventType = "CommandStartedEvent" - commandSucceededEvent monitoringEventType = "CommandSucceededEvent" - commandFailedEvent monitoringEventType = "CommandFailedEvent" - poolCreatedEvent monitoringEventType = "PoolCreatedEvent" - poolReadyEvent monitoringEventType = "PoolReadyEvent" - poolClearedEvent monitoringEventType = "PoolClearedEvent" - poolClosedEvent monitoringEventType = "PoolClosedEvent" - connectionCreatedEvent monitoringEventType = "ConnectionCreatedEvent" - connectionReadyEvent monitoringEventType = "ConnectionReadyEvent" - connectionClosedEvent monitoringEventType = "ConnectionClosedEvent" - connectionCheckOutStartedEvent monitoringEventType = "ConnectionCheckOutStartedEvent" - connectionCheckOutFailedEvent monitoringEventType = "ConnectionCheckOutFailedEvent" - connectionCheckedOutEvent monitoringEventType = "ConnectionCheckedOutEvent" - connectionCheckedInEvent monitoringEventType = "ConnectionCheckedInEvent" - serverDescriptionChangedEvent monitoringEventType = "ServerDescriptionChangedEvent" + commandStartedEvent monitoringEventType = "CommandStartedEvent" + commandSucceededEvent monitoringEventType = "CommandSucceededEvent" + commandFailedEvent monitoringEventType = "CommandFailedEvent" + poolCreatedEvent monitoringEventType = "PoolCreatedEvent" + poolReadyEvent monitoringEventType = "PoolReadyEvent" + poolClearedEvent monitoringEventType = "PoolClearedEvent" + poolClosedEvent monitoringEventType = "PoolClosedEvent" + connectionCreatedEvent monitoringEventType = "ConnectionCreatedEvent" + connectionReadyEvent monitoringEventType = "ConnectionReadyEvent" + connectionClosedEvent monitoringEventType = "ConnectionClosedEvent" + connectionCheckOutStartedEvent monitoringEventType = "ConnectionCheckOutStartedEvent" + connectionCheckOutFailedEvent monitoringEventType = "ConnectionCheckOutFailedEvent" + connectionCheckedOutEvent monitoringEventType = "ConnectionCheckedOutEvent" + connectionCheckedInEvent monitoringEventType = "ConnectionCheckedInEvent" + serverDescriptionChangedEvent monitoringEventType = "ServerDescriptionChangedEvent" + serverHeartbeatFailedEvent monitoringEventType = "ServerHeartbeatFailedEvent" + serverHeartbeatStartedEvent monitoringEventType = "ServerHeartbeatStartedEvent" + serverHeartbeatSucceededEvent monitoringEventType = "ServerHeartbeatSucceededEvent" + topologyDescriptionChangedEvent monitoringEventType = "TopologyDescriptionChangedEvent" ) func monitoringEventTypeFromString(eventStr string) (monitoringEventType, bool) { @@ -64,6 +68,14 @@ func monitoringEventTypeFromString(eventStr string) (monitoringEventType, bool) return connectionCheckedInEvent, true case "serverdescriptionchangedevent": return serverDescriptionChangedEvent, true + case "serverheartbeatfailedevent": + return serverHeartbeatFailedEvent, true + case "serverheartbeatstartedevent": + return serverHeartbeatStartedEvent, true + case "serverheartbeatsucceededevent": + return serverHeartbeatSucceededEvent, true + case "topologydescriptionchangedevent": + return topologyDescriptionChangedEvent, true default: return "", false } diff --git a/mongo/integration/unified/logger_verification.go b/mongo/integration/unified/logger_verification.go index ad13d097fb..c6447749f5 100644 --- a/mongo/integration/unified/logger_verification.go +++ b/mongo/integration/unified/logger_verification.go @@ -44,10 +44,6 @@ func newLogMessage(level int, msg string, args ...interface{}) (*logMessage, err } } - if len(args) == 0 { - return logMessage, nil - } - // The argument slice must have an even number of elements, otherwise it // would not maintain the key-value structure of the document. if len(args)%2 != 0 { @@ -78,8 +74,20 @@ func newLogMessage(level int, msg string, args ...interface{}) (*logMessage, err // clientLogMessages is a struct representing the expected "LogMessages" for a // client. type clientLogMessages struct { - Client string `bson:"client"` - LogMessages []*logMessage `bson:"messages"` + Client string `bson:"client"` + IgnoreMessages []*logMessage `bson:"ignoreMessages"` + LogMessages []*logMessage `bson:"messages"` +} + +// ignore checks to see if the message is in the "IgnoreMessages" slice. +func (clm clientLogMessages) ignore(ctx context.Context, msg *logMessage) bool { + for _, ignoreMessage := range clm.IgnoreMessages { + if err := verifyLogMatch(ctx, ignoreMessage, msg); err == nil { + return true + } + } + + return false } // logMessageValidator defines the expectation for log messages across all @@ -182,6 +190,9 @@ type logQueues struct { unordered <-chan *logMessage } +// partitionLogQueue will partition the expected logs into "unordered" and +// "ordered" log channels. This function will also remove any logs in the +// "ignoreMessages" list for a client. func partitionLogQueue(ctx context.Context, exp *clientLogMessages) logQueues { orderedLogCh := make(chan *logMessage, len(exp.LogMessages)) unorderedLogCh := make(chan *logMessage, len(exp.LogMessages)) @@ -230,6 +241,12 @@ func matchOrderedLogs(ctx context.Context, logs logQueues) <-chan error { defer close(errs) for actual := range logs.ordered { + // Ignore logs that are in the "IngoreMessages" slice of + // the expected results. + if logs.expected.ignore(ctx, actual) { + continue + } + expected := expLogMessages[0] if expected == nil { continue diff --git a/mongo/integration/unified/logger_verification_test.go b/mongo/integration/unified/logger_verification_test.go new file mode 100644 index 0000000000..5defb8d1c0 --- /dev/null +++ b/mongo/integration/unified/logger_verification_test.go @@ -0,0 +1,84 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package unified + +import ( + "context" + "testing" + + "go.mongodb.org/mongo-driver/internal/assert" + "go.mongodb.org/mongo-driver/internal/logger" + "go.mongodb.org/mongo-driver/internal/require" +) + +func newTestLogMessage(t *testing.T, level int, msg string, args ...interface{}) *logMessage { + t.Helper() + + message, err := newLogMessage(level, msg, args...) + require.NoError(t, err, "failed to create test log message") + + return message +} + +func TestClientLogMessagesIgnore(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clm clientLogMessages + message *logMessage + want bool + }{ + { + name: "empty", + clm: clientLogMessages{}, + message: &logMessage{}, + want: false, + }, + { + name: "no match", + clm: clientLogMessages{ + IgnoreMessages: []*logMessage{ + newTestLogMessage(t, int(logger.LevelDebug), logger.CommandFailed), + }, + }, + message: newTestLogMessage(t, int(logger.LevelInfo), logger.CommandFailed), + want: false, + }, + { + name: "match", + clm: clientLogMessages{ + IgnoreMessages: []*logMessage{ + newTestLogMessage(t, int(logger.LevelDebug), logger.CommandStarted), + }, + }, + message: newTestLogMessage(t, int(logger.LevelDebug), logger.CommandStarted), + want: true, + }, + { + name: "match subset", + clm: clientLogMessages{ + IgnoreMessages: []*logMessage{ + newTestLogMessage(t, int(logger.LevelDebug), logger.CommandStarted), + }, + }, + message: newTestLogMessage(t, int(logger.LevelDebug), logger.CommandStarted, "extrakey", 1), + want: true, + }, + } + + for _, test := range tests { + test := test // capture the range variable + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + got := test.clm.ignore(context.Background(), test.message) + assert.Equal(t, test.want, got, "clientLogMessages.ignore() result") + }) + } +} diff --git a/mongo/integration/unified/schema_version.go b/mongo/integration/unified/schema_version.go index 83ca8d133c..c85a2efa79 100644 --- a/mongo/integration/unified/schema_version.go +++ b/mongo/integration/unified/schema_version.go @@ -16,7 +16,7 @@ import ( var ( supportedSchemaVersions = map[int]string{ - 1: "1.13", + 1: "1.16", } ) diff --git a/mongo/integration/unified/testrunner_operation.go b/mongo/integration/unified/testrunner_operation.go index 06a49eccc3..4362c21e95 100644 --- a/mongo/integration/unified/testrunner_operation.go +++ b/mongo/integration/unified/testrunner_operation.go @@ -172,6 +172,10 @@ func executeTestRunnerOperation(ctx context.Context, operation *operation, loopD for idx, entity := range createEntities { for entityType, entityOptions := range entity { + if entityType == "client" && hasOperationalFailpoint(ctx) { + entityOptions.setHeartbeatFrequencyMS(lowHeartbeatFrequency) + } + if err := entities(ctx).addEntity(ctx, entityType, entityOptions); err != nil { return fmt.Errorf("error creating entity at index %d: %v", idx, err) } diff --git a/mongo/integration/unified/unified_spec_runner.go b/mongo/integration/unified/unified_spec_runner.go index 43f1f83832..bac72bde27 100644 --- a/mongo/integration/unified/unified_spec_runner.go +++ b/mongo/integration/unified/unified_spec_runner.go @@ -33,10 +33,7 @@ var ( } logMessageValidatorTimeout = 10 * time.Millisecond -) - -const ( - lowHeartbeatFrequency int32 = 50 + lowHeartbeatFrequency = 50 * time.Millisecond ) // TestCase holds and runs a unified spec test case @@ -104,7 +101,7 @@ func runTestFile(t *testing.T, filepath string, expectValidFail bool, opts ...*O content, err := ioutil.ReadFile(filepath) assert.Nil(t, err, "ReadFile error for file %q: %v", filepath, err) - fileReqs, testCases := ParseTestFile(t, content, opts...) + fileReqs, testCases := ParseTestFile(t, content, expectValidFail, opts...) mtOpts := mtest.NewOptions(). RunOn(fileReqs...). @@ -160,11 +157,14 @@ func parseTestFile(testJSON []byte, opts ...*Options) ([]mtest.RunOnBlock, []*Te } // ParseTestFile create an array of TestCases from the testJSON json blob -func ParseTestFile(t *testing.T, testJSON []byte, opts ...*Options) ([]mtest.RunOnBlock, []*TestCase) { +func ParseTestFile(t *testing.T, testJSON []byte, expectValidFail bool, opts ...*Options) ([]mtest.RunOnBlock, []*TestCase) { t.Helper() runOnRequirements, testCases, err := parseTestFile(testJSON, opts...) - assert.NoError(t, err, "error parsing test file") + + if !expectValidFail { + assert.NoError(t, err, "error parsing test file") + } return runOnRequirements, testCases } @@ -228,7 +228,7 @@ func (tc *TestCase) Run(ls LoggerSkipper) error { expectedLogCount += len(clientLog.LogMessages) } - testCtx := newTestContext(context.Background(), tc.entities, expectedLogCount) + testCtx := newTestContext(context.Background(), tc.entities, expectedLogCount, tc.setsFailPoint()) defer func() { // If anything fails while doing test cleanup, we only log the error because the actual test may have already @@ -266,16 +266,10 @@ func (tc *TestCase) Run(ls LoggerSkipper) error { // a fail point, set a low heartbeatFrequencyMS value into the URI options map if one is not already present. // This speeds up recovery time for the client if the fail point forces the server to return a state change // error. - shouldSetHeartbeatFrequency := tc.setsFailPoint() for idx, entity := range tc.createEntities { for entityType, entityOptions := range entity { - if shouldSetHeartbeatFrequency && entityType == "client" { - if entityOptions.URIOptions == nil { - entityOptions.URIOptions = make(bson.M) - } - if _, ok := entityOptions.URIOptions["heartbeatFrequencyMS"]; !ok { - entityOptions.URIOptions["heartbeatFrequencyMS"] = lowHeartbeatFrequency - } + if entityType == "client" && hasOperationalFailpoint(testCtx) { + entityOptions.setHeartbeatFrequencyMS(lowHeartbeatFrequency) } if err := tc.entities.addEntity(testCtx, entityType, entityOptions); err != nil { diff --git a/mongo/integration/unified/unified_spec_test.go b/mongo/integration/unified/unified_spec_test.go index 8d59c9afbd..b56c15dd6a 100644 --- a/mongo/integration/unified/unified_spec_test.go +++ b/mongo/integration/unified/unified_spec_test.go @@ -29,6 +29,7 @@ var ( "client-side-encryption/unified", "client-side-operations-timeout", "gridfs", + "server-discovery-and-monitoring/unified", "run-command", } failDirectories = []string{ diff --git a/testdata/server-discovery-and-monitoring/unified/logging-loadbalanced.json b/testdata/server-discovery-and-monitoring/unified/logging-loadbalanced.json new file mode 100644 index 0000000000..45440d2557 --- /dev/null +++ b/testdata/server-discovery-and-monitoring/unified/logging-loadbalanced.json @@ -0,0 +1,150 @@ +{ + "description": "loadbalanced-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/testdata/server-discovery-and-monitoring/unified/logging-loadbalanced.yml b/testdata/server-discovery-and-monitoring/unified/logging-loadbalanced.yml new file mode 100644 index 0000000000..87ad4bcf5d --- /dev/null +++ b/testdata/server-discovery-and-monitoring/unified/logging-loadbalanced.yml @@ -0,0 +1,74 @@ +description: "loadbalanced-logging" + +schemaVersion: "1.16" + +runOnRequirements: + - topologies: + - load-balanced + minServerVersion: "4.4" # awaitable hello + +tests: + - description: "Topology lifecycle" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeLogMessages: + topology: debug + observeEvents: + - topologyDescriptionChangedEvent + # ensure the topology has been fully discovered before closing the client. + # expected events are initial server discovery and server connect event. + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + topologyDescriptionChangedEvent: {} + count: 2 + - name: close + object: *client + expectLogMessages: + - client: *client + messages: + - level: debug + component: topology + data: + message: "Starting topology monitoring" + topologyId: { $$exists: true } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } # unknown topology + newDescription: { $$exists: true } # unknown topology, disconnected server + - level: debug + component: topology + data: + message: "Starting server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } + newDescription: { $$exists: true } # loadBalanced topology + - level: debug + component: topology + data: + message: "Stopped server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Stopped topology monitoring" + topologyId: { $$exists: true } diff --git a/testdata/server-discovery-and-monitoring/unified/logging-replicaset.json b/testdata/server-discovery-and-monitoring/unified/logging-replicaset.json new file mode 100644 index 0000000000..e6738225cd --- /dev/null +++ b/testdata/server-discovery-and-monitoring/unified/logging-replicaset.json @@ -0,0 +1,606 @@ +{ + "description": "replicaset-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 3 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/testdata/server-discovery-and-monitoring/unified/logging-replicaset.yml b/testdata/server-discovery-and-monitoring/unified/logging-replicaset.yml new file mode 100644 index 0000000000..4644dc1355 --- /dev/null +++ b/testdata/server-discovery-and-monitoring/unified/logging-replicaset.yml @@ -0,0 +1,289 @@ +description: "replicaset-logging" + +schemaVersion: "1.16" + +runOnRequirements: + - topologies: + - replicaset + minServerVersion: "4.4" # awaitable hello + +createEntities: + - client: + id: &setupClient setupClient + +tests: + - description: "Topology lifecycle" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeLogMessages: + topology: debug + observeEvents: + - topologyDescriptionChangedEvent + # ensure the topology has been fully discovered before closing the client. + # expected events are initial cluster type change from unknown to ReplicaSet, and connect events for each of 3 servers. + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + topologyDescriptionChangedEvent: {} + count: 4 + - name: close + object: *client + expectLogMessages: + - client: *client + ignoreMessages: + - level: debug + component: topology + data: + message: "Starting server monitoring" + - level: debug + component: topology + data: + message: "Server heartbeat started" + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + - level: debug + component: topology + data: + message: "Server heartbeat failed" + messages: + - level: debug + component: topology + data: + message: "Starting topology monitoring" + topologyId: { $$exists: true } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } # unknown topology + newDescription: { $$exists: true } # ReplicaSet topology + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } + newDescription: { $$exists: true } # server connected + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } + newDescription: { $$exists: true } # server connected + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } + newDescription: { $$exists: true } # server connected + - level: debug + component: topology + data: + message: "Stopped server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Stopped server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Stopped server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } # ReplicaSet topology + newDescription: { $$exists: true } # unknown topology + - level: debug + component: topology + data: + message: "Stopped topology monitoring" + topologyId: { $$exists: true } + - description: Successful heartbeat + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: *client + observeLogMessages: + topology: debug + observeEvents: + - serverHeartbeatSucceededEvent + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + serverHeartbeatSucceededEvent: {} + count: 3 + expectLogMessages: + - client: *client + ignoreExtraMessages: true + ignoreMessages: + - level: debug + component: topology + data: + message: "Server heartbeat started" + - level: debug + component: topology + data: + message: "Starting server monitoring" + - level: debug + component: topology + data: + message: "Stopped server monitoring" + - level: debug + component: topology + data: + message: "Topology description changed" + messages: + - level: debug + component: topology + data: + message: "Starting topology monitoring" + topologyId: { $$exists: true } + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + serverConnectionId: { $$exists: true } + durationMS: { $$type: [int, long] } + reply: + $$matchAsDocument: + "$$matchAsRoot": + ok: 1 + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + serverConnectionId: { $$exists: true } + durationMS: { $$type: [int, long] } + reply: + $$matchAsDocument: + "$$matchAsRoot": + ok: 1 + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + serverConnectionId: { $$exists: true } + durationMS: { $$type: [int, long] } + reply: + $$matchAsDocument: + "$$matchAsRoot": + ok: 1 + - description: Failing heartbeat + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: *client + observeLogMessages: + topology: debug + observeEvents: + - serverHeartbeatFailedEvent + uriOptions: + appname: failingHeartbeatLoggingTest + - name: failPoint + object: testRunner + arguments: + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: + - hello + - isMaster + appName: failingHeartbeatLoggingTest + closeConnection: true + client: *setupClient + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + serverHeartbeatFailedEvent: {} + count: 1 + expectLogMessages: + - client: *client + ignoreExtraMessages: true + ignoreMessages: + - level: debug + component: topology + data: + message: "Server heartbeat started" + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + - level: debug + component: topology + data: + message: "Starting server monitoring" + - level: debug + component: topology + data: + message: "Stopped server monitoring" + - level: debug + component: topology + data: + message: "Topology description changed" + messages: + - level: debug + component: topology + data: + message: "Starting topology monitoring" + topologyId: + "$$exists": true + - level: debug + component: topology + data: + message: "Server heartbeat failed" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + durationMS: { $$type: [int, long] } + failure: { $$exists: true } diff --git a/testdata/server-discovery-and-monitoring/unified/logging-sharded.json b/testdata/server-discovery-and-monitoring/unified/logging-sharded.json new file mode 100644 index 0000000000..61b27f5be0 --- /dev/null +++ b/testdata/server-discovery-and-monitoring/unified/logging-sharded.json @@ -0,0 +1,492 @@ +{ + "description": "sharded-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ], + "useMultipleMongoses": true + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 3 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/testdata/server-discovery-and-monitoring/unified/logging-sharded.yml b/testdata/server-discovery-and-monitoring/unified/logging-sharded.yml new file mode 100644 index 0000000000..34cd3611e2 --- /dev/null +++ b/testdata/server-discovery-and-monitoring/unified/logging-sharded.yml @@ -0,0 +1,248 @@ +description: "sharded-logging" + +schemaVersion: "1.16" + +runOnRequirements: + - topologies: + - sharded + minServerVersion: "4.4" # awaitable hello + +createEntities: + - client: + id: &setupClient setupClient + useMultipleMongoses: false + +tests: + - description: "Topology lifecycle" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeLogMessages: + topology: debug + observeEvents: + - topologyDescriptionChangedEvent + useMultipleMongoses: true + # ensure the topology has been fully discovered before closing the client. + # expected events are initial cluster type change from unknown to sharded, and connect events for each of 2 servers. + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + topologyDescriptionChangedEvent: {} + count: 3 + - name: close + object: *client + expectLogMessages: + - client: *client + ignoreMessages: + - level: debug + component: topology + data: + message: "Starting server monitoring" + - level: debug + component: topology + data: + message: "Server heartbeat started" + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + - level: debug + component: topology + data: + message: "Server heartbeat failed" + messages: + - level: debug + component: topology + data: + message: "Starting topology monitoring" + topologyId: { $$exists: true } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } # unknown topology + newDescription: { $$exists: true } # Sharded topology + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } + newDescription: { $$exists: true } # shard router connected + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } + newDescription: { $$exists: true } # shard router connected + - level: debug + component: topology + data: + message: "Stopped server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Stopped server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } # Sharded topology + newDescription: { $$exists: true } # unknown topology + - level: debug + component: topology + data: + message: "Stopped topology monitoring" + topologyId: { $$exists: true } + - description: Successful heartbeat + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: *client + observeLogMessages: + topology: debug + observeEvents: + - serverHeartbeatSucceededEvent + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + serverHeartbeatSucceededEvent: {} + count: 1 + expectLogMessages: + - client: *client + ignoreExtraMessages: true + ignoreMessages: + - level: debug + component: topology + data: + message: "Server heartbeat started" + - level: debug + component: topology + data: + message: "Starting server monitoring" + - level: debug + component: topology + data: + message: "Stopped server monitoring" + - level: debug + component: topology + data: + message: "Topology description changed" + messages: + - level: debug + component: topology + data: + message: "Starting topology monitoring" + topologyId: { $$exists: true } + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + serverConnectionId: { $$exists: true } + durationMS: { $$type: [int, long] } + reply: + $$matchAsDocument: + "$$matchAsRoot": + ok: 1 + - description: Failing heartbeat + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: *client + observeLogMessages: + topology: debug + observeEvents: + - serverHeartbeatStartedEvent + - serverHeartbeatFailedEvent + uriOptions: + appname: failingHeartbeatLoggingTest + - name: failPoint + object: testRunner + arguments: + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: + - hello + - isMaster + appName: failingHeartbeatLoggingTest + closeConnection: true + client: *setupClient + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + serverHeartbeatFailedEvent: {} + count: 1 + expectLogMessages: + - client: *client + ignoreExtraMessages: true + ignoreMessages: + - level: debug + component: topology + data: + message: "Server heartbeat started" + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + - level: debug + component: topology + data: + message: "Starting server monitoring" + - level: debug + component: topology + data: + message: "Stopped server monitoring" + - level: debug + component: topology + data: + message: "Topology description changed" + messages: + - level: debug + component: topology + data: + message: "Starting topology monitoring" + topologyId: + "$$exists": true + - level: debug + component: topology + data: + message: "Server heartbeat failed" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + durationMS: { $$type: [int, long] } + failure: { $$exists: true } diff --git a/testdata/server-discovery-and-monitoring/unified/logging-standalone.json b/testdata/server-discovery-and-monitoring/unified/logging-standalone.json new file mode 100644 index 0000000000..1ee6dbe899 --- /dev/null +++ b/testdata/server-discovery-and-monitoring/unified/logging-standalone.json @@ -0,0 +1,517 @@ +{ + "description": "standalone-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "single" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/testdata/server-discovery-and-monitoring/unified/logging-standalone.yml b/testdata/server-discovery-and-monitoring/unified/logging-standalone.yml new file mode 100644 index 0000000000..95c2676d44 --- /dev/null +++ b/testdata/server-discovery-and-monitoring/unified/logging-standalone.yml @@ -0,0 +1,258 @@ +description: "standalone-logging" + +schemaVersion: "1.16" + +runOnRequirements: + - topologies: + - single + minServerVersion: "4.4" # awaitable hello + +createEntities: + - client: + id: &setupClient setupClient + +tests: + - description: "Topology lifecycle" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeLogMessages: + topology: debug + observeEvents: + - topologyDescriptionChangedEvent + # ensure the topology has been fully discovered before closing the client. + # expected events are initial server discovery and server connect event. + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + topologyDescriptionChangedEvent: {} + count: 2 + - name: close + object: *client + expectLogMessages: + - client: *client + ignoreMessages: + - level: debug + component: topology + data: + message: "Server heartbeat started" + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + - level: debug + component: topology + data: + message: "Server heartbeat failed" + messages: + - level: debug + component: topology + data: + message: "Starting topology monitoring" + topologyId: { $$exists: true } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } # unknown topology + newDescription: { $$exists: true } # unknown topology, disconnected server + - level: debug + component: topology + data: + message: "Starting server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } + newDescription: { $$exists: true } # standalone topology + - level: debug + component: topology + data: + message: "Stopped server monitoring" + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + - level: debug + component: topology + data: + message: "Topology description changed" + topologyId: { $$exists: true } + previousDescription: { $$exists: true } # standalone topology + newDescription: { $$exists: true } # unknown topology + - level: debug + component: topology + data: + message: "Stopped topology monitoring" + topologyId: { $$exists: true } + - description: Successful heartbeat + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeLogMessages: + topology: debug + observeEvents: + - serverHeartbeatSucceededEvent + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + serverHeartbeatSucceededEvent: {} + count: 1 + - name: close + object: *client + expectLogMessages: + - client: *client + ignoreExtraMessages: true + ignoreMessages: + - level: debug + component: topology + data: + message: "Topology description changed" + - level: debug + component: topology + data: + message: "Stopped topology monitoring" + - level: debug + component: topology + data: + message: "Stopped server monitoring" + - level: debug + component: topology + data: + message: "Topology description changed" + - level: debug + component: topology + data: + message: "Starting server monitoring" + - level: debug + component: topology + data: + message: "Starting topology monitoring" + - level: debug + component: topology + data: + message: "Server heartbeat failed" + messages: + - level: debug + component: topology + data: + message: "Server heartbeat started" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + serverConnectionId: { $$exists: true } + durationMS: { $$type: [int, long] } + reply: + $$matchAsDocument: + "$$matchAsRoot": + ok: 1 + - description: Failing heartbeat + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeLogMessages: + topology: debug + observeEvents: + - serverHeartbeatFailedEvent + uriOptions: + appname: failingHeartbeatLoggingTest + - name: failPoint + object: testRunner + arguments: + client: *setupClient + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: + - hello + - isMaster + appName: failingHeartbeatLoggingTest + closeConnection: true + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + serverHeartbeatFailedEvent: {} + count: 1 + expectLogMessages: + - client: *client + ignoreExtraMessages: true + ignoreMessages: + - level: debug + component: topology + data: + message: "Topology description changed" + - level: debug + component: topology + data: + message: "Stopped topology monitoring" + - level: debug + component: topology + data: + message: "Stopped server monitoring" + - level: debug + component: topology + data: + message: "Topology description changed" + - level: debug + component: topology + data: + message: "Starting server monitoring" + - level: debug + component: topology + data: + message: "Starting topology monitoring" + - level: debug + component: topology + data: + message: "Server heartbeat started" + - level: debug + component: topology + data: + message: "Server heartbeat succeeded" + messages: + - level: debug + component: topology + data: + message: "Server heartbeat failed" + awaited: { $$exists: true } + topologyId: { $$exists: true } + serverHost: { $$type: string } + serverPort: { $$type: [int, long] } + driverConnectionId: { $$exists: true } + durationMS: { $$type: [int, long] } + failure: { $$exists: true } diff --git a/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreExtraMessages-type.json b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreExtraMessages-type.json new file mode 100644 index 0000000000..a9f2da9bce --- /dev/null +++ b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreExtraMessages-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-ignoreExtraMessages-type", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "ignoreExtraMessages": "true", + "messages": [] + } + ] + } + ] +} diff --git a/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreExtraMessages-type.yml b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreExtraMessages-type.yml new file mode 100644 index 0000000000..3e11d35d82 --- /dev/null +++ b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreExtraMessages-type.yml @@ -0,0 +1,15 @@ +description: "expectedLogMessagesForClient-ignoreExtraMessages-type" + +schemaVersion: "1.16" + +createEntities: + - client: + id: &client0 "client0" + +tests: + - description: "foo" + operations: [] + expectLogMessages: + - client: *client0 + ignoreExtraMessages: "true" + messages: [] diff --git a/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-items.json b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-items.json new file mode 100644 index 0000000000..345faf41f5 --- /dev/null +++ b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-items.json @@ -0,0 +1,26 @@ +{ + "description": "expectedLogMessagesForClient-ignoreMessages-items", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "ignoreMessages": [ + 0 + ] + } + ] + } + ] +} diff --git a/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-items.yml b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-items.yml new file mode 100644 index 0000000000..f429c3e5bf --- /dev/null +++ b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-items.yml @@ -0,0 +1,15 @@ +description: "expectedLogMessagesForClient-ignoreMessages-items" + +schemaVersion: "1.16" + +createEntities: + - client: + id: &client0 "client0" + +tests: + - description: "foo" + operations: [] + expectLogMessages: + - client: *client0 + messages: [] + ignoreMessages: [0] diff --git a/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-type.json b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-type.json new file mode 100644 index 0000000000..4bc2d41dbf --- /dev/null +++ b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-ignoreMessages-type", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "ignoreMessages": 0 + } + ] + } + ] +} diff --git a/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-type.yml b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-type.yml new file mode 100644 index 0000000000..9512f3358e --- /dev/null +++ b/testdata/unified-test-format/valid-fail/expectedLogMessagesForClient-ignoreMessages-type.yml @@ -0,0 +1,15 @@ +description: "expectedLogMessagesForClient-ignoreMessages-type" + +schemaVersion: "1.16" + +createEntities: + - client: + id: &client0 "client0" + +tests: + - description: "foo" + operations: [] + expectLogMessages: + - client: *client0 + messages: [] + ignoreMessages: 0 diff --git a/x/mongo/driver/topology/server.go b/x/mongo/driver/topology/server.go index f0a1c5b05c..600797df40 100644 --- a/x/mongo/driver/topology/server.go +++ b/x/mongo/driver/topology/server.go @@ -17,6 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -188,6 +189,39 @@ func NewServer(addr address.Address, topologyID primitive.ObjectID, opts ...Serv return s } +func mustLogServerMessage(srv *Server) bool { + return srv.cfg.logger != nil && srv.cfg.logger.LevelComponentEnabled( + logger.LevelDebug, logger.ComponentTopology) +} + +func logServerMessage(srv *Server, msg string, keysAndValues ...interface{}) { + serverHost, serverPort, err := net.SplitHostPort(srv.address.String()) + if err != nil { + serverHost = srv.address.String() + serverPort = "" + } + + var driverConnectionID uint64 + var serverConnectionID *int64 + + if srv.conn != nil { + driverConnectionID = srv.conn.driverConnectionID + serverConnectionID = srv.conn.serverConnectionID + } + + srv.cfg.logger.Print(logger.LevelDebug, + logger.ComponentTopology, + msg, + logger.SerializeServer(logger.Server{ + DriverConnectionID: driverConnectionID, + TopologyID: srv.topologyID, + Message: msg, + ServerConnectionID: serverConnectionID, + ServerHost: serverHost, + ServerPort: serverPort, + }, keysAndValues...)...) +} + // Connect initializes the Server by starting background monitoring goroutines. // This method must be called before a Server can be used. func (s *Server) Connect(updateCallback updateTopologyCallback) error { @@ -497,7 +531,7 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE return driver.ConnectionPoolCleared } -// update handles performing heartbeats and updating any subscribers of the +// update handle performing heartbeats and updating any subscribers of the // newest description.Server retrieved. func (s *Server) update() { defer s.closewg.Done() @@ -947,6 +981,10 @@ func (s *Server) publishServerOpeningEvent(addr address.Address) { if s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerOpening != nil { s.cfg.serverMonitor.ServerOpening(serverOpening) } + + if mustLogServerMessage(s) { + logServerMessage(s, logger.TopologyServerOpening) + } } // publishes a ServerHeartbeatStartedEvent to indicate a hello command has started @@ -959,6 +997,11 @@ func (s *Server) publishServerHeartbeatStartedEvent(connectionID string, await b if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatStarted != nil { s.cfg.serverMonitor.ServerHeartbeatStarted(serverHeartbeatStarted) } + + if mustLogServerMessage(s) { + logServerMessage(s, logger.TopologyServerHeartbeatStarted, + logger.KeyAwaited, await) + } } // publishes a ServerHeartbeatSucceededEvent to indicate hello has succeeded @@ -978,6 +1021,13 @@ func (s *Server) publishServerHeartbeatSucceededEvent(connectionID string, if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatSucceeded != nil { s.cfg.serverMonitor.ServerHeartbeatSucceeded(serverHeartbeatSucceeded) } + + if mustLogServerMessage(s) { + logServerMessage(s, logger.TopologyServerHeartbeatStarted, + logger.KeyAwaited, await, + logger.KeyDurationMS, duration.Milliseconds(), + logger.KeyReply, desc) + } } // publishes a ServerHeartbeatFailedEvent to indicate hello has failed @@ -997,6 +1047,13 @@ func (s *Server) publishServerHeartbeatFailedEvent(connectionID string, if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatFailed != nil { s.cfg.serverMonitor.ServerHeartbeatFailed(serverHeartbeatFailed) } + + if mustLogServerMessage(s) { + logServerMessage(s, logger.TopologyServerHeartbeatFailed, + logger.KeyAwaited, await, + logger.KeyDurationMS, duration.Milliseconds(), + logger.KeyFailure, err.Error()) + } } // unwrapConnectionError returns the connection error wrapped by err, or nil if err does not wrap a connection error. diff --git a/x/mongo/driver/topology/topology.go b/x/mongo/driver/topology/topology.go index d5a27cbb3a..3ed598f381 100644 --- a/x/mongo/driver/topology/topology.go +++ b/x/mongo/driver/topology/topology.go @@ -23,6 +23,7 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/internal/randutil" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" @@ -157,6 +158,21 @@ func New(cfg *Config) (*Topology, error) { return t, nil } +func mustLogTopologyMessage(topo *Topology) bool { + return topo.cfg.logger != nil && topo.cfg.logger.LevelComponentEnabled( + logger.LevelDebug, logger.ComponentTopology) +} + +func logTopologyMessage(topo *Topology, msg string, keysAndValues ...interface{}) { + topo.cfg.logger.Print(logger.LevelDebug, + logger.ComponentTopology, + msg, + logger.SerializeTopology(logger.Topology{ + ID: topo.id, + Message: msg, + }, keysAndValues...)...) +} + // Connect initializes a Topology and starts the monitoring process. This function // must be called to properly monitor the topology. func (t *Topology) Connect() error { @@ -818,6 +834,18 @@ func (t *Topology) publishServerClosedEvent(addr address.Address) { if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.ServerClosed != nil { t.cfg.ServerMonitor.ServerClosed(serverClosed) } + + if mustLogTopologyMessage(t) { + serverHost, serverPort, err := net.SplitHostPort(addr.String()) + if err != nil { + serverHost = addr.String() + serverPort = "" + } + + logTopologyMessage(t, logger.TopologyServerClosed, + logger.KeyServerHost, serverHost, + logger.KeyServerPort, serverPort) + } } // publishes a TopologyDescriptionChangedEvent to indicate the topology description has changed @@ -831,6 +859,12 @@ func (t *Topology) publishTopologyDescriptionChangedEvent(prev description.Topol if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyDescriptionChanged != nil { t.cfg.ServerMonitor.TopologyDescriptionChanged(topologyDescriptionChanged) } + + if mustLogTopologyMessage(t) { + logTopologyMessage(t, logger.TopologyDescriptionChanged, + logger.KeyPreviousDescription, prev.String(), + logger.KeyNewDescription, current.String()) + } } // publishes a TopologyOpeningEvent to indicate the topology is being initialized @@ -842,6 +876,10 @@ func (t *Topology) publishTopologyOpeningEvent() { if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyOpening != nil { t.cfg.ServerMonitor.TopologyOpening(topologyOpening) } + + if mustLogTopologyMessage(t) { + logTopologyMessage(t, logger.TopologyOpening) + } } // publishes a TopologyClosedEvent to indicate the topology has been closed @@ -853,4 +891,8 @@ func (t *Topology) publishTopologyClosedEvent() { if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyClosed != nil { t.cfg.ServerMonitor.TopologyClosed(topologyClosed) } + + if mustLogTopologyMessage(t) { + logTopologyMessage(t, logger.TopologyClosed) + } } diff --git a/x/mongo/driver/topology/topology_options.go b/x/mongo/driver/topology/topology_options.go index 67e6cbf9fd..87ce73c4e6 100644 --- a/x/mongo/driver/topology/topology_options.go +++ b/x/mongo/driver/topology/topology_options.go @@ -38,6 +38,7 @@ type Config struct { SRVMaxHosts int SRVServiceName string LoadBalanced bool + logger *logger.Logger } // ConvertToDriverAPIOptions converts a options.ServerAPIOptions instance to a driver.ServerAPIOptions. @@ -52,6 +53,20 @@ func ConvertToDriverAPIOptions(s *options.ServerAPIOptions) *driver.ServerAPIOpt return driverOpts } +func newLogger(opts options.LoggerOptions) (*logger.Logger, error) { + componentLevels := make(map[logger.Component]logger.Level) + for component, level := range opts.ComponentLevels { + componentLevels[logger.Component(component)] = logger.Level(level) + } + + log, err := logger.New(opts.Sink, opts.MaxDocumentLength, componentLevels) + if err != nil { + return nil, fmt.Errorf("error creating logger: %w", err) + } + + return log, nil +} + // NewConfig will translate data from client options into a topology config for building non-default deployments. // Server and topoplogy options are not honored if a custom deployment is used. func NewConfig(co *options.ClientOptions, clock *session.ClusterClock) (*Config, error) { @@ -336,21 +351,17 @@ func NewConfig(co *options.ClientOptions, clock *session.ClusterClock) (*Config, } if opts := co.LoggerOptions; opts != nil { - // Build an internal component-level mapping. - componentLevels := make(map[logger.Component]logger.Level) - for component, level := range opts.ComponentLevels { - componentLevels[logger.Component(component)] = logger.Level(level) - } - - log, err := logger.New(opts.Sink, opts.MaxDocumentLength, componentLevels) + lgr, err := newLogger(*opts) if err != nil { - return nil, fmt.Errorf("error creating logger: %w", err) + return nil, err } serverOpts = append( serverOpts, - withLogger(func() *logger.Logger { return log }), + withLogger(func() *logger.Logger { return lgr }), ) + + cfgp.logger = lgr } serverOpts = append( From 6d814184973ae7ffc13281c93b423dc522a4bcca Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 19 Jul 2023 14:26:41 -0500 Subject: [PATCH 12/15] GODRIVER-2916 Clean up Lambda Test Script (#1329) Co-authored-by: Preston Vasquez --- .evergreen/config.yml | 84 +++++++++++++++++++++++++------------------ 1 file changed, 50 insertions(+), 34 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8513e5ba22..ab6289576a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1184,33 +1184,6 @@ functions: ${PREPARE_SHELL} ${PROJECT_DIRECTORY}/.evergreen/run-fuzz.sh - run-deployed-aws-lambda-tests: - - command: ec2.assume_role - params: - role_arn: ${LAMBDA_AWS_ROLE_ARN} - duration_seconds: 3600 - - command: shell.exec - params: - working_dir: src/go.mongodb.org/mongo-driver - shell: bash - env: - PROJECT_DIRECTORY: ${PROJECT_DIRECTORY} - DRIVERS_TOOLS: ${DRIVERS_TOOLS} - DRIVERS_ATLAS_PUBLIC_API_KEY: ${DRIVERS_ATLAS_PUBLIC_API_KEY} - DRIVERS_ATLAS_PRIVATE_API_KEY: ${DRIVERS_ATLAS_PRIVATE_API_KEY} - DRIVERS_ATLAS_LAMBDA_USER: ${DRIVERS_ATLAS_LAMBDA_USER} - DRIVERS_ATLAS_LAMBDA_PASSWORD: ${DRIVERS_ATLAS_LAMBDA_PASSWORD} - DRIVERS_ATLAS_GROUP_ID: ${DRIVERS_ATLAS_GROUP_ID} - AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY} - AWS_SESSION_TOKEN: ${AWS_SESSION_TOKEN} - TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/internal/test/faas/awslambda - LAMBDA_STACK_NAME: dbx-go-lambda - AWS_REGION: us-east-1 - script: | - ${PREPARE_SHELL} - .evergreen/run-deployed-lambda-aws-tests.sh - pre: - func: fetch-source - func: prepare-resources @@ -2279,13 +2252,24 @@ tasks: - func: bootstrap-mongo-orchestration - func: run-fuzz-tests - - name: "test-aws-lambda" - tags: - - latest - - lambda + - name: "test-aws-lambda-deployed" commands: - - func: bootstrap-mongo-orchestration - - func: run-deployed-aws-lambda-tests + - command: ec2.assume_role + params: + role_arn: ${LAMBDA_AWS_ROLE_ARN} + duration_seconds: 3600 + - command: shell.exec + params: + working_dir: src/go.mongodb.org/mongo-driver + shell: bash + add_expansions_to_env: true + env: + TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/internal/test/faas/awslambda + LAMBDA_STACK_NAME: dbx-go-lambda + AWS_REGION: us-east-1 + script: | + ${PREPARE_SHELL} + ./.evergreen/run-deployed-lambda-aws-tests.sh axes: - id: version @@ -2611,6 +2595,38 @@ task_groups: tasks: - testazurekms-task + - name: test-aws-lambda-task-group + setup_group: + - func: fetch-source + - func: prepare-resources + - command: subprocess.exec + params: + working_dir: src/go.mongodb.org/mongo-driver + binary: bash + add_expansions_to_env: true + env: + LAMBDA_STACK_NAME: dbx-go-lambda + AWS_REGION: us-east-1 + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh + - command: expansions.update + params: + file: src/go.mongodb.org/mongo-driver/atlas-expansion.yml + teardown_group: + - command: subprocess.exec + params: + working_dir: src/go.mongodb.org/mongo-driver + binary: bash + add_expansions_to_env: true + env: + LAMBDA_STACK_NAME: dbx-go-lambda + AWS_REGION: us-east-1 + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - test-aws-lambda-deployed buildvariants: - name: static-analysis @@ -2759,7 +2775,7 @@ buildvariants: matrix_spec: { version: ["latest"], os-faas-80: ["rhel80-large-go-1-20"] } display_name: "FaaS ${version} ${os-faas-80}" tasks: - - test-aws-lambda + - test-aws-lambda-task-group - name: testgcpkms-variant display_name: "GCP KMS" From ecd5980a3e5aa97886ffa089e4a46666dc8a5d0f Mon Sep 17 00:00:00 2001 From: Preston Vasquez Date: Thu, 20 Jul 2023 09:27:49 -0600 Subject: [PATCH 13/15] GODRIVER-2891 Add documentation for log levels (#1312) Co-authored-by: Steven Silvester Co-authored-by: Matt Dale <9760375+matthewdale@users.noreply.github.com> --- mongo/options/loggeroptions.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mongo/options/loggeroptions.go b/mongo/options/loggeroptions.go index 4a33e449a5..b837935812 100644 --- a/mongo/options/loggeroptions.go +++ b/mongo/options/loggeroptions.go @@ -51,6 +51,18 @@ type LogSink interface { // Info logs a non-error message with the given key/value pairs. This // method will only be called if the provided level has been defined // for a component in the LoggerOptions. + // + // Here are the following level mappings for V = "Verbosity": + // + // - V(0): off + // - V(1): informational + // - V(2): debugging + // + // This level mapping is taken from the go-logr/logr library + // specifications, specifically: + // + // "Level V(0) is the default, and logger.V(0).Info() has the same + // meaning as logger.Info()." Info(level int, message string, keysAndValues ...interface{}) // Error logs an error message with the given key/value pairs From 85d84cd7b0d0788661a156e98b4e5592128aafc8 Mon Sep 17 00:00:00 2001 From: Qingyang Hu <103950869+qingyang-hu@users.noreply.github.com> Date: Thu, 20 Jul 2023 16:05:56 -0400 Subject: [PATCH 14/15] GODRIVER-2752 Add databaseName field to Command Events (#1298) --- event/monitoring.go | 1 + internal/logger/component.go | 2 ++ internal/logger/component_test.go | 3 +++ .../integration/unified/event_verification.go | 10 +++++++ testdata/command-monitoring/find.json | 26 ++++++++++++------- testdata/command-monitoring/find.yml | 10 ++++++- .../command-monitoring/logging/command.json | 2 ++ .../command-monitoring/logging/command.yml | 2 ++ x/mongo/driver/operation.go | 7 +++-- 9 files changed, 51 insertions(+), 12 deletions(-) diff --git a/event/monitoring.go b/event/monitoring.go index 195fb6b46c..53d1caf2e3 100644 --- a/event/monitoring.go +++ b/event/monitoring.go @@ -43,6 +43,7 @@ type CommandFinishedEvent struct { DurationNanos int64 Duration time.Duration CommandName string + DatabaseName string RequestID int64 ConnectionID string // ServerConnectionID contains the connection ID from the server of the operation. If the server does not return diff --git a/internal/logger/component.go b/internal/logger/component.go index fdd6d42405..079daf50f8 100644 --- a/internal/logger/component.go +++ b/internal/logger/component.go @@ -137,6 +137,7 @@ type Command struct { // TODO(GODRIVER-2824): change the DriverConnectionID type to int64. DriverConnectionID uint64 // Driver's ID for the connection Name string // Command name + DatabaseName string // Database name Message string // Message associated with the command OperationID int32 // Driver-generated operation ID RequestID int64 // Driver-generated request ID @@ -153,6 +154,7 @@ func SerializeCommand(cmd Command, extraKeysAndValues ...interface{}) KeyValues // Initialize the boilerplate keys and values. keysAndValues := KeyValues{ KeyCommandName, cmd.Name, + KeyDatabaseName, cmd.DatabaseName, KeyDriverConnectionID, cmd.DriverConnectionID, KeyMessage, cmd.Message, KeyOperationID, cmd.OperationID, diff --git a/internal/logger/component_test.go b/internal/logger/component_test.go index e7a1942c3d..cf752d2ad7 100644 --- a/internal/logger/component_test.go +++ b/internal/logger/component_test.go @@ -38,6 +38,7 @@ func TestSerializeCommand(t *testing.T) { name: "empty", want: KeyValues{ KeyCommandName, "", + KeyDatabaseName, "", KeyDriverConnectionID, uint64(0), KeyMessage, "", KeyOperationID, int32(0), @@ -50,6 +51,7 @@ func TestSerializeCommand(t *testing.T) { cmd: Command{ DriverConnectionID: 1, Name: "foo", + DatabaseName: "db", Message: "bar", OperationID: 2, RequestID: 3, @@ -60,6 +62,7 @@ func TestSerializeCommand(t *testing.T) { }, want: KeyValues{ KeyCommandName, "foo", + KeyDatabaseName, "db", KeyDriverConnectionID, uint64(1), KeyMessage, "bar", KeyOperationID, int32(2), diff --git a/mongo/integration/unified/event_verification.go b/mongo/integration/unified/event_verification.go index dd6eca8b2c..91f7452907 100644 --- a/mongo/integration/unified/event_verification.go +++ b/mongo/integration/unified/event_verification.go @@ -28,6 +28,7 @@ type commandMonitoringEvent struct { CommandSucceededEvent *struct { CommandName *string `bson:"commandName"` + DatabaseName *string `bson:"databaseName"` Reply bson.Raw `bson:"reply"` HasServerConnectionID *bool `bson:"hasServerConnectionId"` HasServiceID *bool `bson:"hasServiceId"` @@ -35,6 +36,7 @@ type commandMonitoringEvent struct { CommandFailedEvent *struct { CommandName *string `bson:"commandName"` + DatabaseName *string `bson:"databaseName"` HasServerConnectionID *bool `bson:"hasServerConnectionId"` HasServiceID *bool `bson:"hasServiceId"` } `bson:"commandFailedEvent"` @@ -198,6 +200,10 @@ func verifyCommandEvents(ctx context.Context, client *clientEntity, expectedEven return newEventVerificationError(idx, client, "expected command name %q, got %q", *expected.CommandName, actual.CommandName) } + if expected.DatabaseName != nil && *expected.DatabaseName != actual.DatabaseName { + return newEventVerificationError(idx, client, "expected database name %q, got %q", *expected.DatabaseName, + actual.DatabaseName) + } if expected.Reply != nil { expectedDoc := documentToRawValue(expected.Reply) actualDoc := documentToRawValue(actual.Reply) @@ -238,6 +244,10 @@ func verifyCommandEvents(ctx context.Context, client *clientEntity, expectedEven return newEventVerificationError(idx, client, "expected command name %q, got %q", *expected.CommandName, actual.CommandName) } + if expected.DatabaseName != nil && *expected.DatabaseName != actual.DatabaseName { + return newEventVerificationError(idx, client, "expected database name %q, got %q", *expected.DatabaseName, + actual.DatabaseName) + } if expected.HasServiceID != nil { if err := verifyServiceID(*expected.HasServiceID, actual.ServiceID); err != nil { return newEventVerificationError(idx, client, "error verifying serviceID: %v", err) diff --git a/testdata/command-monitoring/find.json b/testdata/command-monitoring/find.json index 4b5f45ae99..bc9668499b 100644 --- a/testdata/command-monitoring/find.json +++ b/testdata/command-monitoring/find.json @@ -1,6 +1,6 @@ { "description": "find", - "schemaVersion": "1.1", + "schemaVersion": "1.15", "createEntities": [ { "client": { @@ -103,7 +103,8 @@ ] } }, - "commandName": "find" + "commandName": "find", + "databaseName": "command-monitoring-tests" } } ] @@ -198,7 +199,8 @@ ] } }, - "commandName": "find" + "commandName": "find", + "databaseName": "command-monitoring-tests" } } ] @@ -262,7 +264,8 @@ ] } }, - "commandName": "find" + "commandName": "find", + "databaseName": "command-monitoring-tests" } } ] @@ -338,7 +341,8 @@ ] } }, - "commandName": "find" + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, { @@ -376,7 +380,8 @@ ] } }, - "commandName": "getMore" + "commandName": "getMore", + "databaseName": "command-monitoring-tests" } } ] @@ -464,7 +469,8 @@ ] } }, - "commandName": "find" + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, { @@ -498,7 +504,8 @@ ] } }, - "commandName": "getMore" + "commandName": "getMore", + "databaseName": "command-monitoring-tests" } } ] @@ -539,7 +546,8 @@ }, { "commandFailedEvent": { - "commandName": "find" + "commandName": "find", + "databaseName": "command-monitoring-tests" } } ] diff --git a/testdata/command-monitoring/find.yml b/testdata/command-monitoring/find.yml index e2bb3f8c92..479e4a460c 100644 --- a/testdata/command-monitoring/find.yml +++ b/testdata/command-monitoring/find.yml @@ -1,6 +1,6 @@ description: "find" -schemaVersion: "1.1" +schemaVersion: "1.15" createEntities: - client: @@ -56,6 +56,7 @@ tests: firstBatch: - { _id: 1, x: 11 } commandName: find + databaseName: *databaseName - description: "A successful find with options" operations: @@ -98,6 +99,7 @@ tests: - { x: 33 } - { x: 22 } commandName: find + databaseName: *databaseName - description: "A successful find with showRecordId and returnKey" operations: @@ -131,6 +133,7 @@ tests: - { _id: 4 } - { _id: 5 } commandName: find + databaseName: *databaseName - description: "A successful find with a getMore" operations: @@ -162,6 +165,7 @@ tests: - { _id: 2, x: 22 } - { _id: 3, x: 33 } commandName: find + databaseName: *databaseName - commandStartedEvent: command: getMore: { $$type: [ int, long ] } @@ -179,6 +183,7 @@ tests: - { _id: 4, x: 44 } - { _id: 5, x: 55 } commandName: getMore + databaseName: *databaseName - description: "A successful find event with a getmore and the server kills the cursor (<= 4.4)" runOnRequirements: @@ -216,6 +221,7 @@ tests: - { _id: 2, x: 22 } - { _id: 3, x: 33 } commandName: find + databaseName: *databaseName - commandStartedEvent: command: getMore: { $$type: [ int, long ] } @@ -232,6 +238,7 @@ tests: nextBatch: - { _id: 4, x: 44 } commandName: getMore + databaseName: *databaseName - description: "A failed find event" operations: @@ -252,3 +259,4 @@ tests: databaseName: *databaseName - commandFailedEvent: commandName: find + databaseName: *databaseName diff --git a/testdata/command-monitoring/logging/command.json b/testdata/command-monitoring/logging/command.json index 3d5c2570be..d2970df692 100644 --- a/testdata/command-monitoring/logging/command.json +++ b/testdata/command-monitoring/logging/command.json @@ -93,6 +93,7 @@ "component": "command", "data": { "message": "Command succeeded", + "databaseName": "logging-tests", "commandName": "ping", "reply": { "$$type": "string" @@ -177,6 +178,7 @@ "component": "command", "data": { "message": "Command failed", + "databaseName": "logging-tests", "commandName": "find", "failure": { "$$exists": true diff --git a/testdata/command-monitoring/logging/command.yml b/testdata/command-monitoring/logging/command.yml index b57a3d15a4..3e3410b06d 100644 --- a/testdata/command-monitoring/logging/command.yml +++ b/testdata/command-monitoring/logging/command.yml @@ -52,6 +52,7 @@ tests: component: command data: message: "Command succeeded" + databaseName: *databaseName commandName: *commandName reply: { $$type: string } requestId: { $$type: [int, long] } @@ -85,6 +86,7 @@ tests: component: command data: message: "Command failed" + databaseName: *databaseName commandName: *commandName failure: { $$exists: true } requestId: { $$type: [int, long] } diff --git a/x/mongo/driver/operation.go b/x/mongo/driver/operation.go index 4f998f7cc7..2356400246 100644 --- a/x/mongo/driver/operation.go +++ b/x/mongo/driver/operation.go @@ -1795,14 +1795,14 @@ func (op Operation) publishStartedEvent(ctx context.Context, info startedInforma DriverConnectionID: info.driverConnectionID, Message: logger.CommandStarted, Name: info.cmdName, + DatabaseName: op.Database, RequestID: int64(info.requestID), ServerConnectionID: info.serverConnID, ServerHost: host, ServerPort: port, ServiceID: info.serviceID, }, - logger.KeyCommand, formattedCmd, - logger.KeyDatabaseName, op.Database)...) + logger.KeyCommand, formattedCmd)...) } @@ -1848,6 +1848,7 @@ func (op Operation) publishFinishedEvent(ctx context.Context, info finishedInfor DriverConnectionID: info.driverConnectionID, Message: logger.CommandSucceeded, Name: info.cmdName, + DatabaseName: op.Database, RequestID: int64(info.requestID), ServerConnectionID: info.serverConnID, ServerHost: host, @@ -1870,6 +1871,7 @@ func (op Operation) publishFinishedEvent(ctx context.Context, info finishedInfor DriverConnectionID: info.driverConnectionID, Message: logger.CommandFailed, Name: info.cmdName, + DatabaseName: op.Database, RequestID: int64(info.requestID), ServerConnectionID: info.serverConnID, ServerHost: host, @@ -1887,6 +1889,7 @@ func (op Operation) publishFinishedEvent(ctx context.Context, info finishedInfor finished := event.CommandFinishedEvent{ CommandName: info.cmdName, + DatabaseName: op.Database, RequestID: int64(info.requestID), ConnectionID: info.connID, Duration: info.duration, From 4567eabd8d237894702bd3f510d2b855c8b51e53 Mon Sep 17 00:00:00 2001 From: Qingyang Hu <103950869+qingyang-hu@users.noreply.github.com> Date: Fri, 21 Jul 2023 18:16:11 -0400 Subject: [PATCH 15/15] GODRIVER-2851 Fix failing "TestCMAPProse" test. (#1316) --- x/mongo/driver/topology/cmap_prose_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/x/mongo/driver/topology/cmap_prose_test.go b/x/mongo/driver/topology/cmap_prose_test.go index 1187a31f7f..53c6e9ba36 100644 --- a/x/mongo/driver/topology/cmap_prose_test.go +++ b/x/mongo/driver/topology/cmap_prose_test.go @@ -106,9 +106,6 @@ func TestCMAPProse(t *testing.T) { }) t.Run("checkOut", func(t *testing.T) { t.Run("connection error publishes events", func(t *testing.T) { - // TODO(GODRIVER-2851): Fix and unskip this test case. - t.Skip("Test fails frequently, skipping. See GODRIVER-2851") - // If checkOut() creates a connection that encounters an error while connecting, // the pool should publish connection created and closed events and checkOut should // return the error. @@ -131,8 +128,7 @@ func TestCMAPProse(t *testing.T) { _, err := pool.checkOut(context.Background()) assert.NotNil(t, err, "expected checkOut() error, got nil") - assert.Equal(t, 1, len(created), "expected 1 opened events, got %d", len(created)) - assert.Equal(t, 1, len(closed), "expected 1 closed events, got %d", len(closed)) + assertConnectionCounts(t, pool, 1, 1) }) t.Run("pool is empty", func(t *testing.T) { // If a checkOut() has to create a new connection and that connection encounters an