diff --git a/x/mongo/driver/topology/connection.go b/x/mongo/driver/topology/connection.go index 34e04be1b6..7a1a35b3b7 100644 --- a/x/mongo/driver/topology/connection.go +++ b/x/mongo/driver/topology/connection.go @@ -9,6 +9,7 @@ package topology import ( "context" "crypto/tls" + "encoding/binary" "errors" "fmt" "io" @@ -80,9 +81,9 @@ type connection struct { // accessTokens in the OIDC authenticator cache. oidcTokenGenID uint64 - // awaitingResponse indicates that the server response was not completely + // awaitRemainingBytes indicates the size of server response that was not completely // read before returning the connection to the pool. - awaitingResponse bool + awaitRemainingBytes *int32 } // newConnection handles the creation of a connection. It does not connect the connection. @@ -111,11 +112,6 @@ func newConnection(addr address.Address, opts ...ConnectionOption) *connection { return c } -// DriverConnectionID returns the driver connection ID. -func (c *connection) DriverConnectionID() int64 { - return c.driverConnectionID -} - // setGenerationNumber sets the connection's generation number if a callback has been provided to do so in connection // configuration. func (c *connection) setGenerationNumber() { @@ -137,6 +133,39 @@ func (c *connection) hasGenerationNumber() bool { return driverutil.IsServerLoadBalanced(c.desc) } +func configureTLS(ctx context.Context, + tlsConnSource tlsConnectionSource, + nc net.Conn, + addr address.Address, + config *tls.Config, + ocspOpts *ocsp.VerifyOptions, +) (net.Conn, error) { + // Ensure config.ServerName is always set for SNI. + if config.ServerName == "" { + hostname := addr.String() + colonPos := strings.LastIndex(hostname, ":") + if colonPos == -1 { + colonPos = len(hostname) + } + + hostname = hostname[:colonPos] + config.ServerName = hostname + } + + client := tlsConnSource.Client(nc, config) + if err := clientHandshake(ctx, client); err != nil { + return nil, err + } + + // Only do OCSP verification if TLS verification is requested. + if !config.InsecureSkipVerify { + if ocspErr := ocsp.Verify(ctx, client.ConnectionState(), ocspOpts); ocspErr != nil { + return nil, ocspErr + } + } + return client, nil +} + // connect handles the I/O for a connection. It will dial, configure TLS, and perform initialization // handshakes. All errors returned by connect are considered "before the handshake completes" and // must be handled by calling the appropriate SDAM handshake error handler. @@ -291,6 +320,10 @@ func (c *connection) closeConnectContext() { } } +func (c *connection) cancellationListenerCallback() { + _ = c.close() +} + func transformNetworkError(ctx context.Context, originalError error, contextDeadlineUsed bool) error { if originalError == nil { return nil @@ -313,10 +346,6 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead return originalError } -func (c *connection) cancellationListenerCallback() { - _ = c.close() -} - func (c *connection) writeWireMessage(ctx context.Context, wm []byte) error { var err error if atomic.LoadInt64(&c.state) != connConnected { @@ -377,14 +406,9 @@ func (c *connection) readWireMessage(ctx context.Context) ([]byte, error) { dst, errMsg, err := c.read(ctx) if err != nil { - if nerr := net.Error(nil); errors.As(err, &nerr) && nerr.Timeout() { - // If the error was a timeout error, instead of closing the - // connection mark it as awaiting response so the pool can read the - // response before making it available to other operations. - c.awaitingResponse = true - } else { - // Otherwise, and close the connection because we don't know what - // the connection state is. + if c.awaitRemainingBytes == nil { + // If the connection was not marked as awaiting response, close the + // connection because we don't know what the connection state is. c.close() } message := errMsg @@ -401,6 +425,26 @@ func (c *connection) readWireMessage(ctx context.Context) ([]byte, error) { return dst, nil } +func (c *connection) parseWmSizeBytes(wmSizeBytes [4]byte) (int32, error) { + // read the length as an int32 + size := int32(binary.LittleEndian.Uint32(wmSizeBytes[:])) + + if size < 4 { + return 0, fmt.Errorf("malformed message length: %d", size) + } + // In the case of a hello response where MaxMessageSize has not yet been set, use the hard-coded + // defaultMaxMessageSize instead. + maxMessageSize := c.desc.MaxMessageSize + if maxMessageSize == 0 { + maxMessageSize = defaultMaxMessageSize + } + if uint32(size) > maxMessageSize { + return 0, errResponseTooLarge + } + + return size, nil +} + func (c *connection) read(ctx context.Context) (bytesRead []byte, errMsg string, err error) { go c.cancellationListener.Listen(ctx, c.cancellationListenerCallback) defer func() { @@ -414,6 +458,14 @@ func (c *connection) read(ctx context.Context) (bytesRead []byte, errMsg string, } }() + isCSOTTimeout := func(err error) bool { + // If the error was a timeout error, instead of closing the + // connection mark it as awaiting response so the pool can read the + // response before making it available to other operations. + nerr := net.Error(nil) + return errors.As(err, &nerr) && nerr.Timeout() + } + // We use an array here because it only costs 4 bytes on the stack and means we'll only need to // reslice dst once instead of twice. var sizeBuf [4]byte @@ -421,29 +473,27 @@ func (c *connection) read(ctx context.Context) (bytesRead []byte, errMsg string, // We do a ReadFull into an array here instead of doing an opportunistic ReadAtLeast into dst // because there might be more than one wire message waiting to be read, for example when // reading messages from an exhaust cursor. - _, err = io.ReadFull(c.nc, sizeBuf[:]) + n, err := io.ReadFull(c.nc, sizeBuf[:]) if err != nil { + if l := int32(n); l == 0 && isCSOTTimeout(err) { + c.awaitRemainingBytes = &l + } return nil, "incomplete read of message header", err } - - // read the length as an int32 - size := (int32(sizeBuf[0])) | (int32(sizeBuf[1]) << 8) | (int32(sizeBuf[2]) << 16) | (int32(sizeBuf[3]) << 24) - - // In the case of a hello response where MaxMessageSize has not yet been set, use the hard-coded - // defaultMaxMessageSize instead. - maxMessageSize := c.desc.MaxMessageSize - if maxMessageSize == 0 { - maxMessageSize = defaultMaxMessageSize - } - if uint32(size) > maxMessageSize { - return nil, errResponseTooLarge.Error(), errResponseTooLarge + size, err := c.parseWmSizeBytes(sizeBuf) + if err != nil { + return nil, err.Error(), err } dst := make([]byte, size) copy(dst, sizeBuf[:]) - _, err = io.ReadFull(c.nc, dst[4:]) + n, err = io.ReadFull(c.nc, dst[4:]) if err != nil { + remainingBytes := size - 4 - int32(n) + if remainingBytes > 0 && isCSOTTimeout(err) { + c.awaitRemainingBytes = &remainingBytes + } return dst, "incomplete read of full message", err } @@ -496,10 +546,6 @@ func (c *connection) setCanStream(canStream bool) { c.canStream = canStream } -func (c initConnection) supportsStreaming() bool { - return c.canStream -} - func (c *connection) setStreaming(streaming bool) { c.currentlyStreaming = streaming } @@ -508,6 +554,14 @@ func (c *connection) getCurrentlyStreaming() bool { return c.currentlyStreaming } +func (c *connection) previousCanceled() bool { + if val := c.prevCanceled.Load(); val != nil { + return val.(bool) + } + + return false +} + func (c *connection) ID() string { return c.id } @@ -516,12 +570,17 @@ func (c *connection) ServerConnectionID() *int64 { return c.serverConnectionID } -func (c *connection) previousCanceled() bool { - if val := c.prevCanceled.Load(); val != nil { - return val.(bool) - } +// DriverConnectionID returns the driver connection ID. +func (c *connection) DriverConnectionID() int64 { + return c.driverConnectionID +} - return false +func (c *connection) OIDCTokenGenID() uint64 { + return c.oidcTokenGenID +} + +func (c *connection) SetOIDCTokenGenID(genID uint64) { + c.oidcTokenGenID = genID } // initConnection is an adapter used during connection initialization. It has the minimum @@ -562,7 +621,7 @@ func (c initConnection) CurrentlyStreaming() bool { return c.getCurrentlyStreaming() } func (c initConnection) SupportsStreaming() bool { - return c.supportsStreaming() + return c.canStream } // Connection implements the driver.Connection interface to allow reading and writing wire @@ -797,39 +856,6 @@ func (c *Connection) DriverConnectionID() int64 { return c.connection.DriverConnectionID() } -func configureTLS(ctx context.Context, - tlsConnSource tlsConnectionSource, - nc net.Conn, - addr address.Address, - config *tls.Config, - ocspOpts *ocsp.VerifyOptions, -) (net.Conn, error) { - // Ensure config.ServerName is always set for SNI. - if config.ServerName == "" { - hostname := addr.String() - colonPos := strings.LastIndex(hostname, ":") - if colonPos == -1 { - colonPos = len(hostname) - } - - hostname = hostname[:colonPos] - config.ServerName = hostname - } - - client := tlsConnSource.Client(nc, config) - if err := clientHandshake(ctx, client); err != nil { - return nil, err - } - - // Only do OCSP verification if TLS verification is requested. - if !config.InsecureSkipVerify { - if ocspErr := ocsp.Verify(ctx, client.ConnectionState(), ocspOpts); ocspErr != nil { - return nil, ocspErr - } - } - return client, nil -} - // OIDCTokenGenID returns the OIDC token generation ID. func (c *Connection) OIDCTokenGenID() uint64 { return c.oidcTokenGenID @@ -839,11 +865,3 @@ func (c *Connection) OIDCTokenGenID() uint64 { func (c *Connection) SetOIDCTokenGenID(genID uint64) { c.oidcTokenGenID = genID } - -func (c *connection) OIDCTokenGenID() uint64 { - return c.oidcTokenGenID -} - -func (c *connection) SetOIDCTokenGenID(genID uint64) { - c.oidcTokenGenID = genID -} diff --git a/x/mongo/driver/topology/connection_test.go b/x/mongo/driver/topology/connection_test.go index 5b7dac4537..88f14fc533 100644 --- a/x/mongo/driver/topology/connection_test.go +++ b/x/mongo/driver/topology/connection_test.go @@ -393,6 +393,23 @@ func TestConnection(t *testing.T) { } listener.assertCalledOnce(t) }) + t.Run("size too small errors", func(t *testing.T) { + err := errors.New("malformed message length: 3") + tnc := &testNetConn{readerr: err, buf: []byte{0x03, 0x00, 0x00, 0x00}} + conn := &connection{id: "foobar", nc: tnc, state: connConnected} + listener := newTestCancellationListener(false) + conn.cancellationListener = listener + + want := ConnectionError{ConnectionID: "foobar", Wrapped: err, message: err.Error()} + _, got := conn.readWireMessage(context.Background()) + if !cmp.Equal(got, want, cmp.Comparer(compareErrors)) { + t.Errorf("errors do not match. got %v; want %v", got, want) + } + if !tnc.closed { + t.Errorf("failed to closeConnection net.Conn after error writing bytes.") + } + listener.assertCalledOnce(t) + }) t.Run("full message read errors", func(t *testing.T) { err := errors.New("Read error") tnc := &testNetConn{readerr: err, buf: []byte{0x11, 0x00, 0x00, 0x00}} diff --git a/x/mongo/driver/topology/pool.go b/x/mongo/driver/topology/pool.go index e9a9e8ef20..499d2854dd 100644 --- a/x/mongo/driver/topology/pool.go +++ b/x/mongo/driver/topology/pool.go @@ -9,6 +9,7 @@ package topology import ( "context" "fmt" + "io" "net" "sync" "sync/atomic" @@ -790,17 +791,27 @@ var ( // // It calls the package-global BGReadCallback function, if set, with the // address, timings, and any errors that occurred. -func bgRead(pool *pool, conn *connection) { - var start, read time.Time - start = time.Now() - errs := make([]error, 0) - connClosed := false +func bgRead(pool *pool, conn *connection, size int32) { + var err error + start := time.Now() defer func() { + read := time.Now() + errs := make([]error, 0) + connClosed := false + if err != nil { + errs = append(errs, err) + connClosed = true + err = conn.close() + if err != nil { + errs = append(errs, fmt.Errorf("error closing conn after reading: %w", err)) + } + } + // No matter what happens, always check the connection back into the // pool, which will either make it available for other operations or // remove it from the pool if it was closed. - err := pool.checkInNoEvent(conn) + err = pool.checkInNoEvent(conn) if err != nil { errs = append(errs, fmt.Errorf("error checking in: %w", err)) } @@ -810,34 +821,28 @@ func bgRead(pool *pool, conn *connection) { } }() - err := conn.nc.SetReadDeadline(time.Now().Add(BGReadTimeout)) + err = conn.nc.SetReadDeadline(time.Now().Add(BGReadTimeout)) if err != nil { - errs = append(errs, fmt.Errorf("error setting a read deadline: %w", err)) - - connClosed = true - err := conn.close() - if err != nil { - errs = append(errs, fmt.Errorf("error closing conn after setting read deadline: %w", err)) - } - + err = fmt.Errorf("error setting a read deadline: %w", err) return } - // The context here is only used for cancellation, not deadline timeout, so - // use context.Background(). The read timeout is set by calling - // SetReadDeadline above. - _, _, err = conn.read(context.Background()) - read = time.Now() - if err != nil { - errs = append(errs, fmt.Errorf("error reading: %w", err)) - - connClosed = true - err := conn.close() + if size == 0 { + var sizeBuf [4]byte + _, err = io.ReadFull(conn.nc, sizeBuf[:]) if err != nil { - errs = append(errs, fmt.Errorf("error closing conn after reading: %w", err)) + err = fmt.Errorf("error reading the message size: %w", err) + return } - - return + size, err = conn.parseWmSizeBytes(sizeBuf) + if err != nil { + return + } + size -= 4 + } + _, err = io.CopyN(io.Discard, conn.nc, int64(size)) + if err != nil { + err = fmt.Errorf("error discarding %d byte message: %w", size, err) } } @@ -888,9 +893,10 @@ func (p *pool) checkInNoEvent(conn *connection) error { // means that connections in "awaiting response" state are checked in but // not usable, which is not covered by the current pool events. We may need // to add pool event information in the future to communicate that. - if conn.awaitingResponse { - conn.awaitingResponse = false - go bgRead(p, conn) + if conn.awaitRemainingBytes != nil { + size := *conn.awaitRemainingBytes + conn.awaitRemainingBytes = nil + go bgRead(p, conn, size) return nil } diff --git a/x/mongo/driver/topology/pool_test.go b/x/mongo/driver/topology/pool_test.go index e8ecbe1476..f5e3ddfd6d 100644 --- a/x/mongo/driver/topology/pool_test.go +++ b/x/mongo/driver/topology/pool_test.go @@ -10,12 +10,14 @@ import ( "context" "errors" "net" + "regexp" "sync" "testing" "time" "go.mongodb.org/mongo-driver/v2/event" "go.mongodb.org/mongo-driver/v2/internal/assert" + "go.mongodb.org/mongo-driver/v2/internal/csot" "go.mongodb.org/mongo-driver/v2/internal/eventtest" "go.mongodb.org/mongo-driver/v2/internal/require" "go.mongodb.org/mongo-driver/v2/mongo/address" @@ -1154,6 +1156,310 @@ func TestPool(t *testing.T) { }) } +func TestBackgroundRead(t *testing.T) { + t.Parallel() + + newBGReadCallback := func(errsCh chan []error) func(string, time.Time, time.Time, []error, bool) { + return func(_ string, _, _ time.Time, errs []error, _ bool) { + errsCh <- errs + close(errsCh) + } + } + + t.Run("incomplete read of message header", func(t *testing.T) { + errsCh := make(chan []error) + var originalCallback func(string, time.Time, time.Time, []error, bool) + originalCallback, BGReadCallback = BGReadCallback, newBGReadCallback(errsCh) + t.Cleanup(func() { + BGReadCallback = originalCallback + }) + + timeout := 10 * time.Millisecond + + cleanup := make(chan struct{}) + defer close(cleanup) + addr := bootstrapConnections(t, 1, func(nc net.Conn) { + defer func() { + <-cleanup + _ = nc.Close() + }() + + _, err := nc.Write([]byte{10, 0, 0}) + noerr(t, err) + }) + + p := newPool( + poolConfig{Address: address.Address(addr.String())}, + ) + defer p.close(context.Background()) + err := p.ready() + noerr(t, err) + + conn, err := p.checkOut(context.Background()) + noerr(t, err) + ctx, cancel := csot.WithTimeout(context.Background(), &timeout) + defer cancel() + _, err = conn.readWireMessage(ctx) + regex := regexp.MustCompile( + `^connection\(.*\[-\d+\]\) incomplete read of message header: context deadline exceeded: read tcp 127.0.0.1:.*->127.0.0.1:.*: i\/o timeout$`, + ) + assert.True(t, regex.MatchString(err.Error()), "error %q does not match pattern %q", err, regex) + assert.Nil(t, conn.awaitRemainingBytes, "conn.awaitRemainingBytes should be nil") + close(errsCh) // this line causes a double close if BGReadCallback is ever called. + }) + t.Run("timeout reading message header, successful background read", func(t *testing.T) { + errsCh := make(chan []error) + var originalCallback func(string, time.Time, time.Time, []error, bool) + originalCallback, BGReadCallback = BGReadCallback, newBGReadCallback(errsCh) + t.Cleanup(func() { + BGReadCallback = originalCallback + }) + + timeout := 10 * time.Millisecond + + addr := bootstrapConnections(t, 1, func(nc net.Conn) { + defer func() { + _ = nc.Close() + }() + + // Wait until the operation times out, then write an full message. + time.Sleep(timeout * 2) + _, err := nc.Write([]byte{10, 0, 0, 0, 0, 0, 0, 0, 0, 0}) + noerr(t, err) + }) + + p := newPool( + poolConfig{Address: address.Address(addr.String())}, + ) + defer p.close(context.Background()) + err := p.ready() + noerr(t, err) + + conn, err := p.checkOut(context.Background()) + noerr(t, err) + ctx, cancel := csot.WithTimeout(context.Background(), &timeout) + defer cancel() + _, err = conn.readWireMessage(ctx) + regex := regexp.MustCompile( + `^connection\(.*\[-\d+\]\) incomplete read of message header: context deadline exceeded: read tcp 127.0.0.1:.*->127.0.0.1:.*: i\/o timeout$`, + ) + assert.True(t, regex.MatchString(err.Error()), "error %q does not match pattern %q", err, regex) + err = p.checkIn(conn) + noerr(t, err) + var bgErrs []error + select { + case bgErrs = <-errsCh: + case <-time.After(3 * time.Second): + assert.Fail(t, "did not receive expected error after waiting for 3 seconds") + } + require.Len(t, bgErrs, 0, "expected no error from bgRead()") + }) + t.Run("timeout reading message header, incomplete head during background read", func(t *testing.T) { + errsCh := make(chan []error) + var originalCallback func(string, time.Time, time.Time, []error, bool) + originalCallback, BGReadCallback = BGReadCallback, newBGReadCallback(errsCh) + t.Cleanup(func() { + BGReadCallback = originalCallback + }) + + timeout := 10 * time.Millisecond + + addr := bootstrapConnections(t, 1, func(nc net.Conn) { + defer func() { + _ = nc.Close() + }() + + // Wait until the operation times out, then write an incomplete head. + time.Sleep(timeout * 2) + _, err := nc.Write([]byte{10, 0, 0}) + noerr(t, err) + }) + + p := newPool( + poolConfig{Address: address.Address(addr.String())}, + ) + defer p.close(context.Background()) + err := p.ready() + noerr(t, err) + + conn, err := p.checkOut(context.Background()) + noerr(t, err) + ctx, cancel := csot.WithTimeout(context.Background(), &timeout) + defer cancel() + _, err = conn.readWireMessage(ctx) + regex := regexp.MustCompile( + `^connection\(.*\[-\d+\]\) incomplete read of message header: context deadline exceeded: read tcp 127.0.0.1:.*->127.0.0.1:.*: i\/o timeout$`, + ) + assert.True(t, regex.MatchString(err.Error()), "error %q does not match pattern %q", err, regex) + err = p.checkIn(conn) + noerr(t, err) + var bgErrs []error + select { + case bgErrs = <-errsCh: + case <-time.After(3 * time.Second): + assert.Fail(t, "did not receive expected error after waiting for 3 seconds") + } + require.Len(t, bgErrs, 1, "expected 1 error from bgRead()") + assert.EqualError(t, bgErrs[0], "error reading the message size: unexpected EOF") + }) + t.Run("timeout reading message header, background read timeout", func(t *testing.T) { + errsCh := make(chan []error) + var originalCallback func(string, time.Time, time.Time, []error, bool) + originalCallback, BGReadCallback = BGReadCallback, newBGReadCallback(errsCh) + t.Cleanup(func() { + BGReadCallback = originalCallback + }) + + timeout := 10 * time.Millisecond + + cleanup := make(chan struct{}) + defer close(cleanup) + addr := bootstrapConnections(t, 1, func(nc net.Conn) { + defer func() { + <-cleanup + _ = nc.Close() + }() + + // Wait until the operation times out, then write an incomplete + // message. + time.Sleep(timeout * 2) + _, err := nc.Write([]byte{10, 0, 0, 0, 0, 0, 0, 0}) + noerr(t, err) + }) + + p := newPool( + poolConfig{Address: address.Address(addr.String())}, + ) + defer p.close(context.Background()) + err := p.ready() + noerr(t, err) + + conn, err := p.checkOut(context.Background()) + noerr(t, err) + ctx, cancel := csot.WithTimeout(context.Background(), &timeout) + defer cancel() + _, err = conn.readWireMessage(ctx) + regex := regexp.MustCompile( + `^connection\(.*\[-\d+\]\) incomplete read of message header: context deadline exceeded: read tcp 127.0.0.1:.*->127.0.0.1:.*: i\/o timeout$`, + ) + assert.True(t, regex.MatchString(err.Error()), "error %q does not match pattern %q", err, regex) + err = p.checkIn(conn) + noerr(t, err) + var bgErrs []error + select { + case bgErrs = <-errsCh: + case <-time.After(3 * time.Second): + assert.Fail(t, "did not receive expected error after waiting for 3 seconds") + } + require.Len(t, bgErrs, 1, "expected 1 error from bgRead()") + wantErr := regexp.MustCompile( + `^error discarding 6 byte message: read tcp 127.0.0.1:.*->127.0.0.1:.*: i\/o timeout$`, + ) + assert.True(t, wantErr.MatchString(bgErrs[0].Error()), "error %q does not match pattern %q", bgErrs[0], wantErr) + }) + t.Run("timeout reading full message, successful background read", func(t *testing.T) { + errsCh := make(chan []error) + var originalCallback func(string, time.Time, time.Time, []error, bool) + originalCallback, BGReadCallback = BGReadCallback, newBGReadCallback(errsCh) + t.Cleanup(func() { + BGReadCallback = originalCallback + }) + + timeout := 10 * time.Millisecond + + addr := bootstrapConnections(t, 1, func(nc net.Conn) { + defer func() { + _ = nc.Close() + }() + + var err error + _, err = nc.Write([]byte{12, 0, 0, 0, 0, 0, 0, 0, 1}) + noerr(t, err) + time.Sleep(timeout * 2) + // write a complete message + _, err = nc.Write([]byte{2, 3, 4}) + noerr(t, err) + }) + + p := newPool( + poolConfig{Address: address.Address(addr.String())}, + ) + defer p.close(context.Background()) + err := p.ready() + noerr(t, err) + + conn, err := p.checkOut(context.Background()) + noerr(t, err) + ctx, cancel := csot.WithTimeout(context.Background(), &timeout) + defer cancel() + _, err = conn.readWireMessage(ctx) + regex := regexp.MustCompile( + `^connection\(.*\[-\d+\]\) incomplete read of full message: context deadline exceeded: read tcp 127.0.0.1:.*->127.0.0.1:.*: i\/o timeout$`, + ) + assert.True(t, regex.MatchString(err.Error()), "error %q does not match pattern %q", err, regex) + err = p.checkIn(conn) + noerr(t, err) + var bgErrs []error + select { + case bgErrs = <-errsCh: + case <-time.After(3 * time.Second): + assert.Fail(t, "did not receive expected error after waiting for 3 seconds") + } + require.Len(t, bgErrs, 0, "expected no error from bgRead()") + }) + t.Run("timeout reading full message, background read EOF", func(t *testing.T) { + errsCh := make(chan []error) + var originalCallback func(string, time.Time, time.Time, []error, bool) + originalCallback, BGReadCallback = BGReadCallback, newBGReadCallback(errsCh) + t.Cleanup(func() { + BGReadCallback = originalCallback + }) + + timeout := 10 * time.Millisecond + + addr := bootstrapConnections(t, 1, func(nc net.Conn) { + defer func() { + _ = nc.Close() + }() + + var err error + _, err = nc.Write([]byte{12, 0, 0, 0, 0, 0, 0, 0, 1}) + noerr(t, err) + time.Sleep(timeout * 2) + // write an incomplete message + _, err = nc.Write([]byte{2}) + noerr(t, err) + }) + + p := newPool( + poolConfig{Address: address.Address(addr.String())}, + ) + defer p.close(context.Background()) + err := p.ready() + noerr(t, err) + + conn, err := p.checkOut(context.Background()) + noerr(t, err) + ctx, cancel := csot.WithTimeout(context.Background(), &timeout) + defer cancel() + _, err = conn.readWireMessage(ctx) + regex := regexp.MustCompile( + `^connection\(.*\[-\d+\]\) incomplete read of full message: context deadline exceeded: read tcp 127.0.0.1:.*->127.0.0.1:.*: i\/o timeout$`, + ) + assert.True(t, regex.MatchString(err.Error()), "error %q does not match pattern %q", err, regex) + err = p.checkIn(conn) + noerr(t, err) + var bgErrs []error + select { + case bgErrs = <-errsCh: + case <-time.After(3 * time.Second): + assert.Fail(t, "did not receive expected error after waiting for 3 seconds") + } + require.Len(t, bgErrs, 1, "expected 1 error from bgRead()") + assert.EqualError(t, bgErrs[0], "error discarding 3 byte message: EOF") + }) +} + func assertConnectionsClosed(t *testing.T, dialer *dialer, count int) { t.Helper()