diff --git a/src/control/lib/control/auto.go b/src/control/lib/control/auto.go index 2c46012683a..32ff348981f 100644 --- a/src/control/lib/control/auto.go +++ b/src/control/lib/control/auto.go @@ -891,7 +891,7 @@ func correctSSDCounts(log logging.Logger, sd *storageDetails) error { if ssds.HasVMD() { // If addresses are for VMD backing devices, convert to the logical VMD // endpoint address as this is what is expected in the server config. - newAddrSet, err := ssds.BackingToVMDAddresses(log) + newAddrSet, err := ssds.BackingToVMDAddresses() if err != nil { return errors.Wrap(err, "converting backing addresses to vmd") } diff --git a/src/control/lib/hardware/pci.go b/src/control/lib/hardware/pci.go index 55787e2c44a..cab95fe4a13 100644 --- a/src/control/lib/hardware/pci.go +++ b/src/control/lib/hardware/pci.go @@ -1,5 +1,5 @@ // -// (C) Copyright 2021-2022 Intel Corporation. +// (C) Copyright 2021-2023 Intel Corporation. // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -16,8 +16,6 @@ import ( "github.com/dustin/go-humanize" "github.com/pkg/errors" - - "github.com/daos-stack/daos/src/control/logging" ) const ( @@ -141,8 +139,15 @@ func (pa *PCIAddress) LessThan(other *PCIAddress) bool { return false } - return pa.VMDAddr.LessThan(other.VMDAddr) || - pa.Domain < other.Domain || + // If VMD backing device address, return early on domain comparison if VMD domains are not + // equal. If equal, proceed to sort on backing device address BDF. + if pa.VMDAddr != nil && other.VMDAddr != nil { + if !pa.VMDAddr.Equals(other.VMDAddr) { + return pa.VMDAddr.LessThan(other.VMDAddr) + } + } + + return pa.Domain < other.Domain || pa.Domain == other.Domain && pa.Bus < other.Bus || pa.Domain == other.Domain && pa.Bus == other.Bus && pa.Device < other.Device || pa.Domain == other.Domain && pa.Bus == other.Bus && pa.Device == other.Device && @@ -374,7 +379,7 @@ func (pas *PCIAddressSet) HasVMD() bool { // e.g. [5d0505:01:00.0, 5d0505:03:00.0] -> [0000:5d:05.5]. // // Many assumptions are made as to the input and output PCI address structure in the conversion. -func (pas *PCIAddressSet) BackingToVMDAddresses(log logging.Logger) (*PCIAddressSet, error) { +func (pas *PCIAddressSet) BackingToVMDAddresses() (*PCIAddressSet, error) { if pas == nil { return nil, errors.New("PCIAddressSet is nil") } @@ -394,7 +399,6 @@ func (pas *PCIAddressSet) BackingToVMDAddresses(log logging.Logger) (*PCIAddress return nil, err } - log.Debugf("replacing backing device %s with vmd %s", inAddr, vmdAddr) if err := outAddrs.Add(vmdAddr); err != nil { return nil, err } diff --git a/src/control/lib/hardware/pci_test.go b/src/control/lib/hardware/pci_test.go index 6e5d474467a..98a85be28f0 100644 --- a/src/control/lib/hardware/pci_test.go +++ b/src/control/lib/hardware/pci_test.go @@ -1,5 +1,5 @@ // -// (C) Copyright 2020-2022 Intel Corporation. +// (C) Copyright 2020-2023 Intel Corporation. // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -14,7 +14,6 @@ import ( "github.com/pkg/errors" "github.com/daos-stack/daos/src/control/common/test" - "github.com/daos-stack/daos/src/control/logging" ) func mockPCIBus(args ...uint8) *PCIBus { @@ -179,6 +178,21 @@ func TestHardware_NewPCIAddressSet(t *testing.T) { "0000:80:00.0", "0000:81:00.0", "5d0505:01:00.0", }, }, + "multiple vmd backing device addresses": { + addrStrs: []string{ + "d70505:03:00.0", + "d70505:01:00.0", + "5d0505:03:00.0", + "5d0505:01:00.0", + }, + expAddrStr: "5d0505:01:00.0 5d0505:03:00.0 d70505:01:00.0 d70505:03:00.0", + expAddrStrs: []string{ + "5d0505:01:00.0", + "5d0505:03:00.0", + "d70505:01:00.0", + "d70505:03:00.0", + }, + }, "vmd backing device": { addrStrs: []string{ "050505:03:00.0", "050505:01:00.0", @@ -210,6 +224,41 @@ func TestHardware_NewPCIAddressSet(t *testing.T) { } } +func TestHardware_PCIAddressSet_Addresses(t *testing.T) { + for name, tc := range map[string]struct { + addrStrs []string + expAddrs []*PCIAddress + expErr error + }{ + "multiple vmd backing device addresses": { + addrStrs: []string{ + "d70505:03:00.0", + "d70505:01:00.0", + "5d0505:03:00.0", + "5d0505:01:00.0", + }, + expAddrs: []*PCIAddress{ + MustNewPCIAddress("5d0505:01:00.0"), + MustNewPCIAddress("5d0505:03:00.0"), + MustNewPCIAddress("d70505:01:00.0"), + MustNewPCIAddress("d70505:03:00.0"), + }, + }, + } { + t.Run(name, func(t *testing.T) { + addrSet, err := NewPCIAddressSet(tc.addrStrs...) + test.CmpErr(t, tc.expErr, err) + if tc.expErr != nil { + return + } + + if diff := cmp.Diff(tc.expAddrs, addrSet.Addresses()); diff != "" { + t.Fatalf("unexpected list (-want, +got):\n%s\n", diff) + } + }) + } +} + func TestHardware_PCIAddressSet_Intersect(t *testing.T) { for name, tc := range map[string]struct { addrStrs []string @@ -305,9 +354,17 @@ func TestHardware_PCIAddressSet_BackingToVMDAddresses(t *testing.T) { inAddrs: []string{"5d0505:01:00.0"}, expOutAddrs: []string{"0000:5d:05.5"}, }, - "multiple vmd address": { - inAddrs: []string{"5d0505:01:00.0", "5d0505:03:00.0"}, - expOutAddrs: []string{"0000:5d:05.5"}, + "multiple vmd backing device addresses": { + inAddrs: []string{ + "d70505:01:00.0", + "d70505:03:00.0", + "5d0505:01:00.0", + "5d0505:03:00.0", + }, + expOutAddrs: []string{ + "0000:5d:05.5", + "0000:d7:05.5", + }, }, "short vmd domain in address": { inAddrs: []string{"5d055:01:00.0"}, @@ -315,16 +372,13 @@ func TestHardware_PCIAddressSet_BackingToVMDAddresses(t *testing.T) { }, } { t.Run(name, func(t *testing.T) { - log, buf := logging.NewTestLogger(t.Name()) - defer test.ShowBufferOnFailure(t, buf) - addrSet, gotErr := NewPCIAddressSet(tc.inAddrs...) test.CmpErr(t, tc.expErr, gotErr) if tc.expErr != nil { return } - gotAddrs, gotErr := addrSet.BackingToVMDAddresses(log) + gotAddrs, gotErr := addrSet.BackingToVMDAddresses() test.CmpErr(t, tc.expErr, gotErr) if tc.expErr != nil { return diff --git a/src/control/lib/spdk/spdk.go b/src/control/lib/spdk/spdk.go index 3bb779f550d..062ac719ad9 100644 --- a/src/control/lib/spdk/spdk.go +++ b/src/control/lib/spdk/spdk.go @@ -30,7 +30,7 @@ type EnvOptions struct { EnableVMD bool // flag if VMD functionality should be enabled } -func (eo *EnvOptions) sanitizeAllowList(log logging.Logger) error { +func (eo *EnvOptions) sanitizeAllowList() error { if eo == nil { return errors.New("nil EnvOptions") } @@ -39,7 +39,7 @@ func (eo *EnvOptions) sanitizeAllowList(log logging.Logger) error { } // DPDK will not accept VMD backing device addresses so convert to VMD addresses - newSet, err := eo.PCIAllowList.BackingToVMDAddresses(log) + newSet, err := eo.PCIAllowList.BackingToVMDAddresses() if err != nil { return err } diff --git a/src/control/lib/spdk/spdk_default.go b/src/control/lib/spdk/spdk_default.go index 4c752951edf..77f382f0268 100644 --- a/src/control/lib/spdk/spdk_default.go +++ b/src/control/lib/spdk/spdk_default.go @@ -86,7 +86,7 @@ func (ei *EnvImpl) InitSPDKEnv(log logging.Logger, opts *EnvOptions) error { // Only print error and more severe to stderr. C.spdk_log_set_print_level(C.SPDK_LOG_ERROR) - if err := opts.sanitizeAllowList(log); err != nil { + if err := opts.sanitizeAllowList(); err != nil { return errors.Wrap(err, "sanitizing PCI include list") } diff --git a/src/control/server/storage/bdev.go b/src/control/server/storage/bdev.go index 615719d7ac0..1b36bbd5072 100644 --- a/src/control/server/storage/bdev.go +++ b/src/control/server/storage/bdev.go @@ -340,6 +340,9 @@ type NvmeController struct { // UpdateSmd adds or updates SMD device entry for an NVMe Controller. func (nc *NvmeController) UpdateSmd(newDev *SmdDevice) { + if nc == nil { + return + } for _, exstDev := range nc.SmdDevices { if newDev.UUID == exstDev.UUID { *exstDev = *newDev @@ -352,6 +355,9 @@ func (nc *NvmeController) UpdateSmd(newDev *SmdDevice) { // Capacity returns the cumulative total bytes of all namespace sizes. func (nc *NvmeController) Capacity() (tb uint64) { + if nc == nil { + return 0 + } for _, n := range nc.Namespaces { tb += n.Size } @@ -442,6 +448,17 @@ func (ncs *NvmeControllers) Update(ctrlrs ...NvmeController) { } } +// Addresses returns a hardware.PCIAddressSet pointer to controller addresses. +func (ncs NvmeControllers) Addresses() (*hardware.PCIAddressSet, error) { + pas := hardware.MustNewPCIAddressSet() + for _, c := range ncs { + if err := pas.AddStrings(c.PciAddr); err != nil { + return nil, err + } + } + return pas, nil +} + // NvmeAioDevice returns struct representing an emulated NVMe AIO device (file or kdev). type NvmeAioDevice struct { Path string `json:"path"` diff --git a/src/control/server/storage/bdev/backend_vmd.go b/src/control/server/storage/bdev/backend_vmd.go index 5782d49e755..d651e917b36 100644 --- a/src/control/server/storage/bdev/backend_vmd.go +++ b/src/control/server/storage/bdev/backend_vmd.go @@ -1,5 +1,5 @@ // -// (C) Copyright 2021-2022 Intel Corporation. +// (C) Copyright 2021-2023 Intel Corporation. // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -21,24 +21,17 @@ import ( "github.com/daos-stack/daos/src/control/server/storage" ) -// getVMD returns VMD endpoint address when provided string is a VMD backing device PCI address. -// If the input string is not a VMD backing device PCI address, hardware.ErrNotVMDBackingAddress -// is returned. -func getVMD(inAddr string) (*hardware.PCIAddress, error) { - addr, err := hardware.NewPCIAddress(inAddr) - if err != nil { - return nil, errors.Wrap(err, "controller pci address invalid") - } - - return addr.BackingToVMDAddress() -} - // mapVMDToBackingDevs stores found vmd backing device details under vmd address key. func mapVMDToBackingDevs(foundCtrlrs storage.NvmeControllers) (map[string]storage.NvmeControllers, error) { vmds := make(map[string]storage.NvmeControllers) for _, ctrlr := range foundCtrlrs { - vmdAddr, err := getVMD(ctrlr.PciAddr) + addr, err := hardware.NewPCIAddress(ctrlr.PciAddr) + if err != nil { + return nil, errors.Wrap(err, "controller pci address invalid") + } + + vmdAddr, err := addr.BackingToVMDAddress() if err != nil { if err == hardware.ErrNotVMDBackingAddress { continue @@ -61,8 +54,13 @@ func mapVMDToBackingDevs(foundCtrlrs storage.NvmeControllers) (map[string]storag func mapVMDToBackingAddrs(foundCtrlrs storage.NvmeControllers) (map[string]*hardware.PCIAddressSet, error) { vmds := make(map[string]*hardware.PCIAddressSet) - for _, ctrlr := range foundCtrlrs { - vmdAddr, err := getVMD(ctrlr.PciAddr) + ctrlrAddrs, err := foundCtrlrs.Addresses() + if err != nil { + return nil, err + } + + for _, addr := range ctrlrAddrs.Addresses() { + vmdAddr, err := addr.BackingToVMDAddress() if err != nil { if err == hardware.ErrNotVMDBackingAddress { continue @@ -75,7 +73,7 @@ func mapVMDToBackingAddrs(foundCtrlrs storage.NvmeControllers) (map[string]*hard } // add backing device address to vmd address key in map - if err := vmds[vmdAddr.String()].AddStrings(ctrlr.PciAddr); err != nil { + if err := vmds[vmdAddr.String()].Add(addr); err != nil { return nil, err } } @@ -218,14 +216,13 @@ func vmdFilterAddresses(log logging.Logger, inReq *storage.BdevPrepareRequest, v } // Convert any VMD backing device addresses to endpoint addresses as the input vmdPCIAddrs - // are what we are using for filters and these are VMD endpoint addresses. - // FIXME: This imposes a limitation in that individual backing devices cannot be allowed or - // blocked independently, see if this can be mitigated against. - inAllowList, err = inAllowList.BackingToVMDAddresses(log) + // are what we are using for filters and these are VMD endpoint addresses. This imposes a + // limitation in that individual backing devices cannot be allowed or blocked independently. + inAllowList, err = inAllowList.BackingToVMDAddresses() if err != nil { return } - inBlockList, err = inBlockList.BackingToVMDAddresses(log) + inBlockList, err = inBlockList.BackingToVMDAddresses() if err != nil { return } diff --git a/src/control/server/storage/bdev_test.go b/src/control/server/storage/bdev_test.go index 8b20edd5866..07033369fec 100644 --- a/src/control/server/storage/bdev_test.go +++ b/src/control/server/storage/bdev_test.go @@ -20,6 +20,15 @@ import ( "github.com/daos-stack/daos/src/control/common/test" ) +func ctrlrsFromPCIAddrs(addrs ...string) NvmeControllers { + ncs := make(NvmeControllers, len(addrs)) + for i, addr := range addrs { + nc := NvmeController{PciAddr: addr} + ncs[i] = &nc + } + return ncs +} + func Test_NvmeDevState(t *testing.T) { for name, tc := range map[string]struct { state NvmeDevState @@ -185,22 +194,53 @@ func Test_NvmeController_Update(t *testing.T) { test.AssertEqual(t, len(mockCtrlrs), 7, "expected 7") } -func Test_filterBdevScanResponse(t *testing.T) { - const ( - vmdAddr1 = "0000:5d:05.5" - vmdBackingAddr1a = "5d0505:01:00.0" - vmdBackingAddr1b = "5d0505:03:00.0" - vmdAddr2 = "0000:7d:05.5" - vmdBackingAddr2a = "7d0505:01:00.0" - vmdBackingAddr2b = "7d0505:03:00.0" - ) - ctrlrsFromPCIAddrs := func(addrs ...string) (ncs NvmeControllers) { - for _, addr := range addrs { - ncs = append(ncs, &NvmeController{PciAddr: addr}) - } - return +func Test_NvmeController_Addresses(t *testing.T) { + for name, tc := range map[string]struct { + ctrlrs NvmeControllers + expAddrs []string + expErr error + }{ + "two vmd endpoints with two backing devices": { + ctrlrs: ctrlrsFromPCIAddrs( + "5d0505:03:00.0", + "7d0505:01:00.0", + "5d0505:01:00.0", + "7d0505:03:00.0", + ), + expAddrs: []string{ + "5d0505:01:00.0", + "5d0505:03:00.0", + "7d0505:01:00.0", + "7d0505:03:00.0", + }, + }, + "no addresses": { + expAddrs: []string{}, + }, + "invalid address": { + ctrlrs: ctrlrsFromPCIAddrs("a"), + expErr: errors.New("unable to parse"), + }, + } { + t.Run(name, func(t *testing.T) { + gotAddrs, gotErr := tc.ctrlrs.Addresses() + test.CmpErr(t, tc.expErr, gotErr) + if gotErr != nil { + return + } + + gotAddrStrs := gotAddrs.Strings() + t.Logf("ea: %v\n", gotAddrStrs) + + if diff := cmp.Diff(tc.expAddrs, gotAddrStrs); diff != "" { + //if diff := cmp.Diff(tc.expAddrs, gotAddrs.Strings()); diff != "" { + t.Fatalf("unexpected output address set (-want, +got):\n%s\n", diff) + } + }) } +} +func Test_filterBdevScanResponse(t *testing.T) { for name, tc := range map[string]struct { addrs []string scanResp *BdevScanResponse @@ -208,12 +248,19 @@ func Test_filterBdevScanResponse(t *testing.T) { expErr error }{ "two vmd endpoints; one filtered out": { - addrs: []string{vmdAddr2}, + addrs: []string{"0000:7d:05.5"}, scanResp: &BdevScanResponse{ - Controllers: ctrlrsFromPCIAddrs(vmdBackingAddr1a, vmdBackingAddr1b, - vmdBackingAddr2a, vmdBackingAddr2b), + Controllers: ctrlrsFromPCIAddrs( + "5d0505:03:00.0", + "7d0505:01:00.0", + "5d0505:01:00.0", + "7d0505:03:00.0", + ), + }, + expAddrs: []string{ + "7d0505:01:00.0", + "7d0505:03:00.0", }, - expAddrs: []string{vmdBackingAddr2a, vmdBackingAddr2b}, }, "two ssds; one filtered out": { addrs: []string{"0000:81:00.0"}, diff --git a/src/control/server/storage/provider.go b/src/control/server/storage/provider.go index 05a02a63463..135d94bcee5 100644 --- a/src/control/server/storage/provider.go +++ b/src/control/server/storage/provider.go @@ -644,6 +644,7 @@ func scanBdevTiers(log logging.Logger, vmdEnabled, direct bool, cfg *Config, cac } var bsr BdevScanResponse + scanOrCache := "scanned" if direct { req := BdevScanRequest{ DeviceList: bdevs, @@ -658,14 +659,16 @@ func scanBdevTiers(log logging.Logger, vmdEnabled, direct bool, cfg *Config, cac if cache == nil { cache = &BdevScanResponse{} } - log.Debugf("using controllers from cache %q", cache.Controllers) bsr = *cache + scanOrCache = "cached" } - log.Debugf("bdevs in cfg: %s, scanned: %+v (direct=%v)", bdevs, bsr, direct) + log.Debugf("bdevs in cfg: %s, %s: %+v", bdevs, scanOrCache, bsr) + + // Build slice of bdevs-per-tier from the entire scan response. bdevCfgs := cfg.Tiers.BdevConfigs() results := make([]BdevTierScanResult, 0, len(bdevCfgs)) - resultBdevs := 0 + resultBdevCount := 0 for _, bc := range bdevCfgs { if bc.Bdev.DeviceList.Len() == 0 { continue @@ -674,15 +677,25 @@ func scanBdevTiers(log logging.Logger, vmdEnabled, direct bool, cfg *Config, cac if err != nil { return nil, errors.Wrapf(err, "filter scan cache for tier-%d", bc.Tier) } - resultBdevs += len(fbsr.Controllers) results = append(results, BdevTierScanResult{ Tier: bc.Tier, Result: fbsr, }) + + // Keep tally of total number of controllers added to results. + cpas, err := fbsr.Controllers.Addresses() + if err != nil { + return nil, errors.Wrap(err, "get controller pci addresses") + } + cpas, err = cpas.BackingToVMDAddresses() + if err != nil { + return nil, errors.Wrap(err, "convert backing device to vmd domain addresses") + } + resultBdevCount += cpas.Len() } - if resultBdevs != bdevs.Len() { - log.Errorf("Unexpected scan results, wanted %d controllers got %d", bdevs.Len(), - resultBdevs) + if resultBdevCount != bdevs.Len() { + log.Noticef("Unexpected scan results, wanted %d controllers got %d", bdevs.Len(), + resultBdevCount) } return results, nil diff --git a/src/control/server/storage/provider_test.go b/src/control/server/storage/provider_test.go index cacff8bf469..5ed2fb783d9 100644 --- a/src/control/server/storage/provider_test.go +++ b/src/control/server/storage/provider_test.go @@ -7,7 +7,9 @@ package storage import ( + "fmt" "os" + "strings" "testing" "github.com/google/go-cmp/cmp" @@ -41,6 +43,7 @@ func Test_scanBdevsTiers(t *testing.T) { scanErr error expResults []BdevTierScanResult expErr error + expNotice bool }{ "nil cfg": { expErr: errors.New("nil storage config"), @@ -55,7 +58,7 @@ func Test_scanBdevsTiers(t *testing.T) { }, expErr: errors.New("no bdevs in config"), }, - "nil scan cache": { + "use cache; nil scan cache": { cfg: &Config{ Tiers: TierConfigs{ mockScmTier, @@ -70,6 +73,7 @@ func Test_scanBdevsTiers(t *testing.T) { }, }, }, + expNotice: true, }, "bypass cache; missing controller": { direct: true, @@ -93,6 +97,7 @@ func Test_scanBdevsTiers(t *testing.T) { }, }, }, + expNotice: true, }, "bypass cache": { direct: true, @@ -155,6 +160,7 @@ func Test_scanBdevsTiers(t *testing.T) { }, }, }, + expNotice: true, }, "use cache": { cfg: &Config{ @@ -177,7 +183,7 @@ func Test_scanBdevsTiers(t *testing.T) { }, }, }, - "multi-tier; bypass cache": { + "bypass cache; multi-tier": { direct: true, cfg: &Config{ Tiers: TierConfigs{ @@ -208,7 +214,7 @@ func Test_scanBdevsTiers(t *testing.T) { }, }, }, - "multi-tier; use cache": { + "use cache; multi-tier": { cfg: &Config{ Tiers: TierConfigs{ mockScmTier, @@ -238,6 +244,65 @@ func Test_scanBdevsTiers(t *testing.T) { }, }, }, + "use cache; vmd domain missing in scan": { + cfg: &Config{ + Tiers: TierConfigs{ + mockScmTier, + NewTierConfig().WithStorageClass(ClassNvme.String()). + WithBdevDeviceList("0000:62:00.5", "0000:63:00.5"), + }, + }, + cache: &BdevScanResponse{ + Controllers: NvmeControllers{ + &NvmeController{PciAddr: "620005:83:00.0"}, + &NvmeController{PciAddr: "620005:85:00.0"}, + &NvmeController{PciAddr: "620005:87:00.0"}, + &NvmeController{PciAddr: "620005:81:00.0"}, + }, + }, + expResults: []BdevTierScanResult{ + { + Result: &BdevScanResponse{ + Controllers: NvmeControllers{ + &NvmeController{PciAddr: "620005:83:00.0"}, + &NvmeController{PciAddr: "620005:85:00.0"}, + &NvmeController{PciAddr: "620005:87:00.0"}, + &NvmeController{PciAddr: "620005:81:00.0"}, + }, + }, + }, + }, + expNotice: true, + }, + "use cache; multiple devices behind vmd domain": { + cfg: &Config{ + Tiers: TierConfigs{ + mockScmTier, + NewTierConfig().WithStorageClass(ClassNvme.String()). + WithBdevDeviceList("0000:62:00.5"), + }, + }, + cache: &BdevScanResponse{ + Controllers: NvmeControllers{ + &NvmeController{PciAddr: "620005:83:00.0"}, + &NvmeController{PciAddr: "620005:85:00.0"}, + &NvmeController{PciAddr: "620005:87:00.0"}, + &NvmeController{PciAddr: "620005:81:00.0"}, + }, + }, + expResults: []BdevTierScanResult{ + { + Result: &BdevScanResponse{ + Controllers: NvmeControllers{ + &NvmeController{PciAddr: "620005:83:00.0"}, + &NvmeController{PciAddr: "620005:85:00.0"}, + &NvmeController{PciAddr: "620005:87:00.0"}, + &NvmeController{PciAddr: "620005:81:00.0"}, + }, + }, + }, + }, + }, } { t.Run(name, func(t *testing.T) { log, buf := logging.NewTestLogger(name) @@ -256,6 +321,13 @@ func Test_scanBdevsTiers(t *testing.T) { if diff := cmp.Diff(tc.expResults, gotResults, defBdevCmpOpts()...); diff != "" { t.Fatalf("\nunexpected results (-want, +got):\n%s\n", diff) } + + txtMod := "" + if !tc.expNotice { + txtMod = "not " + } + msg := fmt.Sprintf("expected NOTICE level message to %shave been logged", txtMod) + test.AssertEqual(t, tc.expNotice, strings.Contains(buf.String(), "NOTICE"), msg) }) } }