diff --git a/src/control/server/ctl_storage_rpc_test.go b/src/control/server/ctl_storage_rpc_test.go index f2a402cfd5e..b2d25ad8b2e 100644 --- a/src/control/server/ctl_storage_rpc_test.go +++ b/src/control/server/ctl_storage_rpc_test.go @@ -1569,7 +1569,10 @@ func TestServer_CtlSvc_StorageScan_PostEngineStart(t *testing.T) { var engineCfgs []*engine.Config for i, sc := range tc.storageCfgs { log.Debugf("storage cfg contains bdevs %v for engine %d", sc.Bdevs(), i) - engineCfgs = append(engineCfgs, engine.MockConfig().WithStorage(sc...)) + engineCfgs = append(engineCfgs, + engine.MockConfig(). + WithStorage(sc...). + WithTargetCount(tc.engineTargetCount[i])) } sCfg := config.DefaultServer().WithEngines(engineCfgs...) cs := mockControlService(t, log, sCfg, csbmbc, tc.smbc, tc.smsc) @@ -1625,7 +1628,6 @@ func TestServer_CtlSvc_StorageScan_PostEngineStart(t *testing.T) { } te.setDrpcClient(newMockDrpcClient(dcc)) te._superblock.Rank = ranklist.NewRankPtr(uint32(idx + 1)) - te.setTargetCount(tc.engineTargetCount[idx]) for _, tc := range te.storage.GetBdevConfigs() { tc.Bdev.DeviceRoles.OptionBits = storage.OptionBits(storage.BdevRoleAll) } diff --git a/src/control/server/instance.go b/src/control/server/instance.go index 3837860fabd..14f53cf3b5b 100644 --- a/src/control/server/instance.go +++ b/src/control/server/instance.go @@ -338,14 +338,6 @@ func (ei *EngineInstance) setHugepageSz(hpSizeMb int) { ei.runner.GetConfig().HugepageSz = hpSizeMb } -// setTargetCount updates target count in engine config. -func (ei *EngineInstance) setTargetCount(numTargets int) { - ei.Lock() - defer ei.Unlock() - - ei.runner.GetConfig().TargetCount = numTargets -} - // GetTargetCount returns the target count set for this instance. func (ei *EngineInstance) GetTargetCount() int { ei.RLock() diff --git a/src/control/server/instance_exec.go b/src/control/server/instance_exec.go index 3a31d036137..ab22cb4504f 100644 --- a/src/control/server/instance_exec.go +++ b/src/control/server/instance_exec.go @@ -90,12 +90,6 @@ func (ei *EngineInstance) finishStartup(ctx context.Context, ready *srvpb.Notify if err := ei.handleReady(ctx, ready); err != nil { return err } - // update engine target count to reflect allocated number of targets, not number requested - // when starting - // NOTE: Engine mem_size passed on engine invocation is based on the number of targets - // requested in config so if number of targets allocated doesn't match the number of - // targets requested the mem_size value may be inappropriate. - ei.setTargetCount(int(ready.GetNtgts())) ei.ready.SetTrue() diff --git a/src/engine/init.c b/src/engine/init.c index a376488e62b..8d580188022 100644 --- a/src/engine/init.c +++ b/src/engine/init.c @@ -40,8 +40,7 @@ static char modules[MAX_MODULE_OPTIONS + 1]; /** - * Number of target threads the user would like to start - * 0 means default value, see dss_tgt_nr_get(); + * Number of target threads the user would like to start. */ static unsigned int nr_threads; @@ -250,56 +249,61 @@ modules_load(void) return rc; } +static unsigned int +ncores_needed(unsigned int tgt_nr, unsigned int nr_helpers) +{ + return DAOS_TGT0_OFFSET + tgt_nr + nr_helpers; +} + /** - * Get the appropriate number of main XS based on the number of cores and - * passed in preferred number of threads. + * Check if the #targets and #nr_xs_helpers is valid to start server, the #nr_xs_helpers possibly + * be reduced. */ static int -dss_tgt_nr_get(unsigned int ncores, unsigned int nr, bool oversubscribe) +dss_tgt_nr_check(unsigned int ncores, unsigned int tgt_nr, bool oversubscribe) { - int tgt_nr; - D_ASSERT(ncores >= 1); /* at most 2 helper XS per target */ - if (dss_tgt_offload_xs_nr > 2 * nr) - dss_tgt_offload_xs_nr = 2 * nr; - else if (dss_tgt_offload_xs_nr == 0) + if (dss_tgt_offload_xs_nr > 2 * tgt_nr) { + D_PRINT("#nr_xs_helpers(%d) cannot exceed 2 times #targets (2 x %d = %d).\n", + dss_tgt_offload_xs_nr, tgt_nr, 2 * tgt_nr); + dss_tgt_offload_xs_nr = 2 * tgt_nr; + } else if (dss_tgt_offload_xs_nr == 0) { D_WARN("Suggest to config at least 1 helper XS per DAOS engine\n"); + } - /* Each system XS uses one core, and with dss_tgt_offload_xs_nr - * offload XS. Calculate the tgt_nr as the number of main XS based - * on number of cores. - */ -retry: - tgt_nr = ncores - DAOS_TGT0_OFFSET - dss_tgt_offload_xs_nr; - if (tgt_nr <= 0) - tgt_nr = 1; - - /* If user requires less target threads then set it as dss_tgt_nr, - * if user oversubscribes, then: - * . if oversubscribe is enabled, use the required number - * . if oversubscribe is disabled(default), - * use the number calculated above - * Note: oversubscribing may hurt performance. - */ - if (nr >= 1 && ((nr < tgt_nr) || oversubscribe)) { - tgt_nr = nr; - if (dss_tgt_offload_xs_nr > 2 * tgt_nr) - dss_tgt_offload_xs_nr = 2 * tgt_nr; - } else if (dss_tgt_offload_xs_nr > 2 * tgt_nr) { - dss_tgt_offload_xs_nr--; - goto retry; + if (oversubscribe) { + if (ncores_needed(tgt_nr, dss_tgt_offload_xs_nr) > ncores) { + if (ncores > DAOS_TGT0_OFFSET + tgt_nr) + dss_tgt_offload_xs_nr = ncores - DAOS_TGT0_OFFSET - tgt_nr; + else + dss_tgt_offload_xs_nr = 0; + + D_PRINT("Force to start engine with %d targets on %d cores, #nr_xs_helpers " + "set as %d.\n", + tgt_nr, ncores, dss_tgt_offload_xs_nr); + } + goto out; } - if (tgt_nr != nr) - D_PRINT("%d target XS(xstream) requested (#cores %d); " - "use (%d) target XS\n", nr, ncores, tgt_nr); + if (ncores_needed(tgt_nr, dss_tgt_offload_xs_nr) > ncores) { + if (ncores < DAOS_TGT0_OFFSET + tgt_nr) { + D_ERROR("cannot start engine with %d targets on %d cores, may try with " + "DAOS_TARGET_OVERSUBSCRIBE=1\n", + tgt_nr, ncores); + return -DER_INVAL; + } + dss_tgt_offload_xs_nr = ncores - DAOS_TGT0_OFFSET - tgt_nr; + D_PRINT("Start engine with %d targets on %d cores, #nr_xs_helpers set as %d.\n", + tgt_nr, ncores, dss_tgt_offload_xs_nr); + } +out: if (dss_tgt_offload_xs_nr % tgt_nr != 0) dss_helper_pool = true; - return tgt_nr; + return 0; } static int @@ -321,14 +325,12 @@ dss_topo_init() depth = hwloc_get_type_depth(dss_topo, HWLOC_OBJ_NUMANODE); numa_node_nr = hwloc_get_nbobjs_by_depth(dss_topo, depth); d_getenv_bool("DAOS_TARGET_OVERSUBSCRIBE", &tgt_oversub); + dss_tgt_nr = nr_threads; /* if no NUMA node was specified, or NUMA data unavailable */ /* fall back to the legacy core allocation algorithm */ if (dss_numa_node == -1 || numa_node_nr <= 0) { D_PRINT("Using legacy core allocation algorithm\n"); - dss_tgt_nr = dss_tgt_nr_get(dss_core_nr, nr_threads, - tgt_oversub); - if (dss_core_offset >= dss_core_nr) { D_ERROR("invalid dss_core_offset %u " "(set by \"-f\" option)," @@ -336,7 +338,8 @@ dss_topo_init() dss_core_offset, dss_core_nr - 1); return -DER_INVAL; } - return 0; + + return dss_tgt_nr_check(dss_core_nr, dss_tgt_nr, tgt_oversub); } if (dss_numa_node > numa_node_nr) { @@ -381,17 +384,15 @@ dss_topo_init() hwloc_bitmap_asprintf(&cpuset, core_allocation_bitmap); free(cpuset); - dss_tgt_nr = dss_tgt_nr_get(dss_num_cores_numa_node, nr_threads, - tgt_oversub); if (dss_core_offset >= dss_num_cores_numa_node) { D_ERROR("invalid dss_core_offset %d (set by \"-f\" option), " "should within range [0, %d]", dss_core_offset, dss_num_cores_numa_node - 1); return -DER_INVAL; } - D_PRINT("Using NUMA core allocation algorithm\n"); - return 0; + + return dss_tgt_nr_check(dss_num_cores_numa_node, dss_tgt_nr, tgt_oversub); } static ABT_mutex server_init_state_mutex; diff --git a/src/tests/ftest/control/dmg_server_set_logmasks.yaml b/src/tests/ftest/control/dmg_server_set_logmasks.yaml index b4e3a1ddfeb..68e58cc7488 100644 --- a/src/tests/ftest/control/dmg_server_set_logmasks.yaml +++ b/src/tests/ftest/control/dmg_server_set_logmasks.yaml @@ -6,6 +6,7 @@ server_config: engines_per_host: 1 engines: 0: + targets: 4 storage: 0: class: ram diff --git a/src/tests/ftest/harness/core_files.yaml b/src/tests/ftest/harness/core_files.yaml index 8133398a42f..04cb67f7a11 100644 --- a/src/tests/ftest/harness/core_files.yaml +++ b/src/tests/ftest/harness/core_files.yaml @@ -5,6 +5,7 @@ server_config: engines_per_host: 1 engines: 0: + targets: 4 storage: 0: class: ram diff --git a/src/tests/ftest/pool/create_all_vm.yaml b/src/tests/ftest/pool/create_all_vm.yaml index 0e030ee8cbc..2c053e5b038 100644 --- a/src/tests/ftest/pool/create_all_vm.yaml +++ b/src/tests/ftest/pool/create_all_vm.yaml @@ -32,7 +32,7 @@ server_config: engines_per_host: 1 engines: 0: - targets: 5 + targets: 4 nr_xs_helpers: 0 storage: 0: diff --git a/utils/nlt_server.yaml b/utils/nlt_server.yaml index 4b9a1a9ffd8..5d0d2d9b3ed 100644 --- a/utils/nlt_server.yaml +++ b/utils/nlt_server.yaml @@ -14,6 +14,7 @@ engines: - DAOS_MD_CAP=1024 - DAOS_STRICT_SHUTDOWN=1 - CRT_CTX_SHARE_ADDR=0 + - DAOS_TARGET_OVERSUBSCRIBE=1 - ABT_STACK_OVERFLOW_CHECK=mprotect storage: -