Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GODRIVER-2904 Add environment log. #1373

Merged
merged 7 commits into from
Sep 12, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions x/mongo/driver/topology/polling_srv_records_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ func (ss serverSorter) Less(i, j int) bool {
}

func compareHosts(t *testing.T, received []description.Server, expected []string) {
t.Helper()
if len(received) != len(expected) {
t.Fatalf("Number of hosts in topology does not match expected value. Got %v; want %v.", len(received), len(expected))
}
Expand Down
75 changes: 70 additions & 5 deletions x/mongo/driver/topology/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,63 @@ const (
SingleMode
)

type hostEnv int

const (
genuine hostEnv = iota
cosmosDB
doumentDB
)

var (
cosmosDBLog = `You appear to be connected to a CosmosDB cluster. For more information regarding feature compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb`
documentDBLog = `You appear to be connected to a DocumentDB cluster. For more information regarding feature compatibility and support please visit https://www.mongodb.com/supportability/documentdb`
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggest making cosmosDBLog and documentDBLog constants. See comments on restructuring.

)

type envMap struct {
host string
env hostEnv
}

type hostLogger struct {
logger *logger.Logger
envs []envMap
logs map[hostEnv]*string
}

func newHostLogger(l *logger.Logger) *hostLogger {
return &hostLogger{
logger: l,
envs: []envMap{
{".cosmos.azure.com", cosmosDB},
{".docdb.amazonaws.com", doumentDB},
{".docdb-elastic.amazonaws.com", doumentDB},
},
logs: map[hostEnv]*string{
cosmosDB: &cosmosDBLog,
doumentDB: &documentDBLog,
},
}
}

func (l *hostLogger) log(host string) {
Copy link
Collaborator

@prestonvasquez prestonvasquez Sep 6, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we should have a struct for logging. IIUC the only reason we do this is to maintain state so as not to repeated logs for the same host. However, it might be best to maintain this state in the code that is calling the logger, to minimize any leaking:

			hostSet := map[string]bool{}
			for _, host := range parsedHosts {
				if h, _, err := net.SplitHostPort(host); err == nil {
					host = h
				}

				if !hostSet[host] {
					logTopoloygThirdPartyUsage(t, host)
				}

				hostSet[host] = true
			}

Copy link
Collaborator

@prestonvasquez prestonvasquez Sep 6, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Whether we decided to use a struct or not, we should re-organize the code to match the existing "logTopology*" pattern:

func logTopologyThirdPartyUsage(topo *Topology, host string) {
	thirdPartyMessages := map[string]string{
		".cosmos.azure.com":            cosmosDBLog,
		".docdb.amazonaws.com":         documentDBLog,
		".docdb-elastic.amazonaws.com": documentDBLog,
	}

	for tphost, msg := range thirdPartyMessages {
		if !strings.HasSuffix(host, tphost) {
			continue
		}

		// Serialize the topology message to be consistent with other topology
		// messages.
		topo.cfg.logger.Print(logger.LevelInfo,
			logger.ComponentTopology,
			msg,
			logger.SerializeTopology(logger.Topology{ID: topo.id, Message: msg})...)
	}
}

if h, _, err := net.SplitHostPort(host); err == nil {
host = h
}
for _, em := range l.envs {
if strings.HasSuffix(host, em.host) {
if log, ok := l.logs[em.env]; ok && log != nil {
l.logger.Print(logger.LevelInfo,
logger.ComponentTopology,
*log,
)
l.logs[em.env] = nil
}
break
}
}
}

// Topology represents a MongoDB deployment.
type Topology struct {
state int64
Expand Down Expand Up @@ -321,13 +378,14 @@ func (t *Topology) Connect() error {
}

t.serversLock.Unlock()
uri, err := url.Parse(t.cfg.URI)
if err != nil {
return err
}
parsedHosts := strings.Split(uri.Host, ",")
if t.pollingRequired {
uri, err := url.Parse(t.cfg.URI)
if err != nil {
return err
}
// sanity check before passing the hostname to resolver
if parsedHosts := strings.Split(uri.Host, ","); len(parsedHosts) != 1 {
if len(parsedHosts) != 1 {
return fmt.Errorf("URI with SRV must include one and only one hostname")
}
_, _, err = net.SplitHostPort(uri.Host)
Expand All @@ -338,6 +396,11 @@ func (t *Topology) Connect() error {
}
go t.pollSRVRecords(uri.Host)
t.pollingwg.Add(1)
} else {
logger := newHostLogger(t.cfg.logger)
for _, host := range parsedHosts {
logger.log(host)
Copy link
Collaborator

@prestonvasquez prestonvasquez Sep 6, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Before we log, we first need to check if it is appropriate to log. We should update mustLogTopologyMessage to the following:

func mustLogTopologyMessage(topo *Topology, level logger.Level) bool {
	return topo.cfg.logger != nil && topo.cfg.logger.LevelComponentEnabled(
		level, logger.ComponentTopology)
}

and then wrap this block:

		if mustLogTopologyMessage(t, logger.LevelInfo) {
			hostSet := map[string]bool{}
			for _, host := range parsedHosts {
				if h, _, err := net.SplitHostPort(host); err == nil {
					host = h
				}

				if !hostSet[host] {
					logTopologyThirdPartyUsage(t, host)
				}

				hostSet[host] = true
			}
		}

}
}

t.subscriptionsClosed = false // explicitly set in case topology was disconnected and then reconnected
Expand Down Expand Up @@ -817,10 +880,12 @@ func (t *Topology) processSRVResults(parsedHosts []string) bool {
})
}
// Add all added hosts until the number of servers reaches srvMaxHosts.
logger := newHostLogger(t.cfg.logger)
for _, a := range diff.Added {
if t.cfg.SRVMaxHosts > 0 && len(t.servers) >= t.cfg.SRVMaxHosts {
break
}
logger.log(a)
addr := address.Address(a).Canonicalize()
_ = t.addServer(addr)
t.fsm.addServer(addr)
Expand Down
206 changes: 206 additions & 0 deletions x/mongo/driver/topology/topology_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -693,6 +693,212 @@ func TestTopologyConstruction(t *testing.T) {
})
}

type mockLogSink struct {
msgs []string
}

func (s *mockLogSink) Info(_ int, msg string, _ ...interface{}) {
s.msgs = append(s.msgs, msg)
}
func (*mockLogSink) Error(error, string, ...interface{}) {
// Do nothing.
}

// Note: SRV connection strings are intentionally untested, since initial
// lookup responses cannot be easily mocked.
func TestTopologyConstructionLogging(t *testing.T) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can test can be parallelized? From my testing it appears it can be.

sink := &mockLogSink{}
loggerOptions := options.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Optional: if these are re-defined for each subtest, then we can parallelize the subtests.

Logger().
SetSink(sink).
SetComponentLevel(options.LogComponentTopology, options.LogLevelInfo)
t.Run("CosmosDB URIs", func(t *testing.T) {
testCases := []struct {
name string
uri string
msgs []string
}{
{
name: "normal",
uri: "mongodb://a.mongo.cosmos.azure.com:19555/",
msgs: []string{cosmosDBLog},
},
{
name: "multiple hosts",
uri: "mongodb://a.mongo.cosmos.azure.com:1955,b.mongo.cosmos.azure.com:19555/",
msgs: []string{cosmosDBLog},
},
{
name: "case-insensitive matching",
uri: "mongodb://a.MONGO.COSMOS.AZURE.COM:19555/",
msgs: []string{},
},
{
name: "Mixing genuine and nongenuine hosts (unlikely in practice)",
uri: "mongodb://a.example.com:27017,b.mongo.cosmos.azure.com:19555/",
msgs: []string{cosmosDBLog},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
defer func() {
sink.msgs = []string{}
}()
cfg, err := NewConfig(options.Client().ApplyURI(tc.uri).SetLoggerOptions(loggerOptions), nil)
require.Nil(t, err, "error constructing topology config: %v", err)

topo, err := New(cfg)
require.Nil(t, err, "topology.New error: %v", err)

err = topo.Connect()
require.Nil(t, err, "Connect error: %v", err)

require.ElementsMatch(t, tc.msgs, sink.msgs, "expected messages to be %v, got %v", tc.msgs, sink.msgs)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we use an assertion here? require will stop testing if it fails. This applies to the other subtests as well.

})
}
})
t.Run("DocumentDB URIs", func(t *testing.T) {
testCases := []struct {
name string
uri string
msgs []string
}{
{
name: "normal",
uri: "mongodb://a.docdb.amazonaws.com:27017/",
msgs: []string{documentDBLog},
},
{
name: "normal",
uri: "mongodb://a.docdb-elastic.amazonaws.com:27017/",
msgs: []string{documentDBLog},
},
{
name: "multiple hosts",
uri: "mongodb://a.docdb.amazonaws.com:27017,a.docdb-elastic.amazonaws.com:27017/",
msgs: []string{documentDBLog},
},
{
name: "case-insensitive matching",
uri: "mongodb://a.DOCDB.AMAZONAWS.COM:27017/",
msgs: []string{},
},
{
name: "case-insensitive matching",
uri: "mongodb://a.DOCDB-ELASTIC.AMAZONAWS.COM:27017/",
msgs: []string{},
},
{
name: "Mixing genuine and nongenuine hosts (unlikely in practice)",
uri: "mongodb://a.example.com:27017,b.docdb.amazonaws.com:27017/",
msgs: []string{documentDBLog},
},
{
name: "Mixing genuine and nongenuine hosts (unlikely in practice)",
uri: "mongodb://a.example.com:27017,b.docdb-elastic.amazonaws.com:27017/",
msgs: []string{documentDBLog},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
defer func() {
sink.msgs = []string{}
}()
cfg, err := NewConfig(options.Client().ApplyURI(tc.uri).SetLoggerOptions(loggerOptions), nil)
require.Nil(t, err, "error constructing topology config: %v", err)

topo, err := New(cfg)
require.Nil(t, err, "topology.New error: %v", err)

err = topo.Connect()
require.Nil(t, err, "Connect error: %v", err)

require.ElementsMatch(t, tc.msgs, sink.msgs, "expected messages to be %v, got %v", tc.msgs, sink.msgs)
})
}
})
t.Run("Mixing CosmosDB and DocumentDB URIs", func(t *testing.T) {
testCases := []struct {
name string
uri string
msgs []string
}{
{
name: "Mixing hosts",
uri: "mongodb://a.mongo.cosmos.azure.com:19555,a.docdb.amazonaws.com:27017/",
msgs: []string{cosmosDBLog, documentDBLog},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
defer func() {
sink.msgs = []string{}
}()
cfg, err := NewConfig(options.Client().ApplyURI(tc.uri).SetLoggerOptions(loggerOptions), nil)
require.Nil(t, err, "error constructing topology config: %v", err)

topo, err := New(cfg)
require.Nil(t, err, "topology.New error: %v", err)

err = topo.Connect()
require.Nil(t, err, "Connect error: %v", err)

require.ElementsMatch(t, tc.msgs, sink.msgs, "expected messages to be %v, got %v", tc.msgs, sink.msgs)
})
}
})
t.Run("genuine URIs", func(t *testing.T) {
testCases := []struct {
name string
uri string
msgs []string
}{
{
name: "normal",
uri: "mongodb://a.example.com:27017/",
msgs: []string{},
},
{
name: "multiple hosts",
uri: "mongodb://a.example.com:27017,b.example.com:27017/",
msgs: []string{},
},
{
name: "unexpected suffix",
uri: "mongodb://a.mongo.cosmos.azure.com.tld:19555/",
msgs: []string{},
},
{
name: "unexpected suffix",
uri: "mongodb://a.docdb.amazonaws.com.tld:27017/",
msgs: []string{},
},
{
name: "unexpected suffix",
uri: "mongodb://a.docdb-elastic.amazonaws.com.tld:27017/",
msgs: []string{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
defer func() {
sink.msgs = []string{}
}()
cfg, err := NewConfig(options.Client().ApplyURI(tc.uri).SetLoggerOptions(loggerOptions), nil)
require.Nil(t, err, "error constructing topology config: %v", err)

topo, err := New(cfg)
require.Nil(t, err, "topology.New error: %v", err)

err = topo.Connect()
require.Nil(t, err, "Connect error: %v", err)

require.ElementsMatch(t, tc.msgs, sink.msgs, "expected messages to be %v, got %v", tc.msgs, sink.msgs)
})
}
})
}

type inWindowServer struct {
Address string `json:"address"`
Type string `json:"type"`
Expand Down
Loading