diff --git a/test/functional/tests/cache_ops/test_core_add.py b/test/functional/tests/cache_ops/test_core_add.py new file mode 100644 index 000000000..be4ca057c --- /dev/null +++ b/test/functional/tests/cache_ops/test_core_add.py @@ -0,0 +1,154 @@ +# +# Copyright(c) 2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: BSD-3-Clause +# + +import pytest + +from itertools import cycle +from random import shuffle +from api.cas import casadm +from api.cas.casadm_parser import get_cores, get_detached_cores, get_inactive_cores +from core.test_run import TestRun +from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan +from test_tools.fs_utils import remove, readlink +from test_utils.filesystem.symlink import Symlink +from test_utils.output import CmdException +from test_utils.size import Unit, Size + +cores_number = 4 + + +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +def test_add_core_path_by_id(): + """ + title: Test for adding core with by-id path. + description: | + Check if OpenCAS accepts by-id path to disks added as cores. + pass_criteria: + - Cores are added to cache + - Cores are added to cache with the same path as given + """ + with TestRun.step("Prepare partitions for cache and for cores."): + cache_dev = TestRun.disks["cache"] + cache_dev.create_partitions([Size(200, Unit.MebiByte)]) + cache_part = cache_dev.partitions[0] + core_dev = TestRun.disks["core"] + core_dev.create_partitions([Size(400, Unit.MebiByte)] * cores_number) + + with TestRun.step("Start cache and add cores"): + cache = casadm.start_cache(cache_part, force=True) + for core_dev_part in core_dev.partitions: + cache.add_core(core_dev_part) + + with TestRun.step("Check if all cores are added with proper paths."): + added_cores = get_cores(cache.cache_id) + added_cores_number = len(added_cores) + if added_cores_number != cores_number: + TestRun.fail(f"Expected {cores_number} cores, got {added_cores_number}!") + + for core, partition in zip(added_cores, core_dev.partitions): + if partition.path != core.core_device.path: + TestRun.LOGGER.error( + f"Paths are different and can cause problems!\n" + f"Path passed as an argument to add core: {partition.path}\n" + f"Path displayed by 'casadm -L': {core.core_device.path}" + ) + + +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +def test_add_core_path_not_by_id(): + """ + title: Negative test for adding core with non-by-id path. + description: | + Check if OpenCAS does not accept any other than by-id path to disks added as cores. + pass_criteria: + - Cores are not added to cache + """ + + symlink_path = '/tmp/castle' + + with TestRun.step("Prepare partitions for cache and for cores."): + cache_dev = TestRun.disks["cache"] + cache_dev.create_partitions([Size(200, Unit.MebiByte)]) + cache_part = cache_dev.partitions[0] + core_dev = TestRun.disks["core"] + core_dev.create_partitions([Size(400, Unit.MebiByte)] * cores_number) + + with TestRun.step("Start cache."): + cache = casadm.start_cache(cache_part, force=True) + + with TestRun.step( + f"Create symlinks for {core_dev.path} partitions in {symlink_path} directory."): + core_dev_links = [] + for partition in core_dev.partitions: + path = readlink(partition.path) + core_dev_links.append( + Symlink.create_symlink(f"{symlink_path}_{path.split('/')[-1]}", path) + ) + + with TestRun.step(f"Find various symlinks to {core_dev.path}."): + links = [] + for partition in core_dev.partitions: + links.append(Symlink(get_by_partuuid_link(partition.path))) + links.append(Symlink(readlink(partition.path))) + core_dev_links.extend([ + link for link in links if + readlink(partition.path) in link.get_target() + ]) + + with TestRun.step(f"Select different links to {core_dev.path} partitions."): + selected_links = select_random_links(core_dev_links) + + with TestRun.step(f"Try to add {cores_number} cores with non-by-id path."): + for dev, symlink in zip(core_dev.partitions, selected_links): + dev.path = symlink.full_path + try: + cache.add_core(dev) + TestRun.fail(f"Core {core_dev.path} is added!") + except CmdException: + pass + TestRun.LOGGER.info("Cannot add cores as expected.") + + with TestRun.step("Check if cores are not added."): + get_core_methods = [get_cores, get_inactive_cores, get_detached_cores] + core_types = ["active", "inactive", "detached"] + for method, core_type in zip(get_core_methods, core_types): + added_cores_number = len(method(cache.cache_id)) + if added_cores_number > 0: + TestRun.LOGGER.error( + f"Expected 0 cores, got {added_cores_number} {core_type} cores!" + ) + + with TestRun.step("Cleanup test symlinks."): + remove(f"{symlink_path}_*", True, True) + + +def get_by_partuuid_link(path): + output = TestRun.executor.run(f"blkid {path}") + if "PARTUUID" not in output.stdout: + return path + + uuid = output.stdout.split()[-1] + start = uuid.index('"') + end = uuid.index('"', start + 1) + uuid = uuid[start + 1:end] + + return f"/dev/disk/by-partuuid/{uuid}" + + +def select_random_links(links): + shuffle(links) + selected_links = [] + links_cycle = cycle(links) + + while len(selected_links) < cores_number: + link = next(links_cycle) + target = link.get_target() + if target not in [sel_link.get_target() for sel_link in selected_links]: + selected_links.append(link) + + return selected_links diff --git a/test/functional/tests/initialize/test_startup_init_config.py b/test/functional/tests/initialize/test_startup_init_config.py index 34c18015b..be7865e4d 100644 --- a/test/functional/tests/initialize/test_startup_init_config.py +++ b/test/functional/tests/initialize/test_startup_init_config.py @@ -1,30 +1,34 @@ # # Copyright(c) 2019-2022 Intel Corporation -# Copyright(c) 2024 Huawei Technologies +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # import pytest -from datetime import timedelta -from api.cas import casadm, casctl, casadm_parser -from api.cas.casadm_parser import get_caches, get_cores, get_cas_devices_dict +from datetime import timedelta +from time import sleep +from api.cas import casctl, casadm, casadm_parser from api.cas.cache_config import CacheMode +from api.cas.cas_service import set_cas_service_timeout, clear_cas_service_timeout +from api.cas.cli_messages import check_stdout_msg, no_caches_running +from api.cas.core import CoreStatus from api.cas.init_config import InitConfig from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan -from test_utils.filesystem.file import File from test_tools.disk_utils import Filesystem -from test_utils import fstab +from test_tools.fs_utils import readlink from test_tools.dd import Dd -from test_utils.size import Unit, Size -from test_utils.os_utils import sync, Udev +from test_utils import fstab from test_utils.emergency_escape import EmergencyEscape -from api.cas.cas_service import set_cas_service_timeout, clear_cas_service_timeout +from test_utils.filesystem.file import File +from test_utils.os_utils import sync, Udev +from test_utils.size import Unit, Size mountpoint = "/mnt" filepath = f"{mountpoint}/file" +cores_number = 4 @pytest.mark.os_dependent @@ -36,6 +40,8 @@ def test_cas_startup(cache_mode, filesystem): """ title: Test for starting CAS on system startup. + description: | + Check if OpenCAS loads correctly after system reboot. pass_criteria: - System does not crash. - CAS modules are loaded before partitions are mounted. @@ -43,10 +49,10 @@ def test_cas_startup(cache_mode, filesystem): - Exported object is mounted after startup is complete. """ with TestRun.step("Prepare partitions for cache (200MiB) and for core (400MiB)"): - cache_dev = TestRun.disks['cache'] + cache_dev = TestRun.disks["cache"] cache_dev.create_partitions([Size(200, Unit.MebiByte)]) cache_part = cache_dev.partitions[0] - core_dev = TestRun.disks['core'] + core_dev = TestRun.disks["core"] core_dev.create_partitions([Size(400, Unit.MebiByte)]) core_part = core_dev.partitions[0] @@ -80,14 +86,14 @@ def test_cas_startup(cache_mode, filesystem): TestRun.executor.reboot() with TestRun.step("Check if cache is started"): - caches = list(get_caches()) + caches = casadm_parser.get_caches() if len(caches) != 1: TestRun.fail(f"Expected one cache, got {len(caches)}!") if caches[0].cache_id != cache.cache_id: TestRun.fail("Invalid cache id!") with TestRun.step("Check if core is added"): - cores = list(get_cores(cache.cache_id)) + cores = casadm_parser.get_cores(cache.cache_id) if len(cores) != 1: TestRun.fail(f"Expected one core, got {len(cores)}!") if cores[0].core_id != core.core_id: @@ -120,19 +126,19 @@ def test_cas_init_with_changed_mode(cache_mode_pair): """ title: Check starting cache in other cache mode by initializing OpenCAS service from config. description: | - Start cache, create config based on running configuration but with another cache mode, - reinitialize OpenCAS service with '--force' option and check if cache defined - in config file starts properly. - Check all cache modes. + Start cache, create config based on running configuration but with another cache mode, + reinitialize OpenCAS service with '--force' option and check if cache defined + in config file starts properly. + Check all cache modes. pass_criteria: - Cache starts with attached core - Cache starts in mode saved in configuration file. """ with TestRun.step("Prepare partitions for cache and core."): - cache_dev = TestRun.disks['cache'] + cache_dev = TestRun.disks["cache"] cache_dev.create_partitions([Size(200, Unit.MebiByte)]) cache_part = cache_dev.partitions[0] - core_dev = TestRun.disks['core'] + core_dev = TestRun.disks["core"] core_dev.create_partitions([Size(400, Unit.MebiByte)]) core_part = core_dev.partitions[0] @@ -164,7 +170,7 @@ def test_cas_startup_lazy(): """ title: Test successful boot with CAS configuration including lazy_startup description: | - Check that DUT boots succesfully with failing lazy-startup marked devices + Check that DUT boots succesfully with failing lazy-startup marked devices pass_criteria: - DUT boots sucesfully - caches are configured as expected @@ -215,9 +221,9 @@ def test_cas_startup_lazy(): power_control.power_cycle() with TestRun.step("Verify if all the devices are initialized properly"): - core_pool_list = get_cas_devices_dict()["core_pool"].values() - caches_list = get_cas_devices_dict()["caches"].values() - cores_list = get_cas_devices_dict()["cores"].values() + core_pool_list = casadm_parser.get_cas_devices_dict()["core_pool"] + caches_list = casadm_parser.get_cas_devices_dict()["caches"].values() + cores_list = casadm_parser.get_cas_devices_dict()["cores"].values() core_pool_paths = {c["device_path"] for c in core_pool_list} if core_pool_paths != expected_core_pool_paths: @@ -267,7 +273,7 @@ def test_cas_startup_negative_missing_core(): """ title: Test unsuccessful boot with CAS configuration description: | - Check that DUT doesn't boot sucesfully when using invalid CAS configuration + Check that DUT doesn't boot sucesfully when using invalid CAS configuration pass_criteria: - DUT enters emergency mode """ @@ -329,7 +335,7 @@ def test_cas_startup_negative_missing_cache(): """ title: Test unsuccessful boot with CAS configuration description: | - Check that DUT doesn't boot sucesfully when using invalid CAS configuration + Check that DUT doesn't boot sucesfully when using invalid CAS configuration pass_criteria: - DUT enters emergency mode """ @@ -393,8 +399,8 @@ def test_failover_config_startup(): """ title: Test successful boot with failover-specific configuration options description: | - Check that DUT boots sucesfully and CAS is properly configured when using failover-specific - configuration options (target_failover_state) + Check that DUT boots sucesfully and CAS is properly configured when using failover-specific + configuration options (target_failover_state) pass_criteria: - DUT boots sucesfully - caches are configured as expected @@ -441,29 +447,29 @@ def test_failover_config_startup(): power_control.power_cycle() with TestRun.step("Verify if all the devices are initialized properly"): - core_pool_list = get_cas_devices_dict()["core_pool"] - caches_list = get_cas_devices_dict()["caches"].values() - cores_list = get_cas_devices_dict()["cores"].values() + core_pool_list = casadm_parser.get_cas_devices_dict()["core_pool"] + caches_list = casadm_parser.get_cas_devices_dict()["caches"].values() + cores_list = casadm_parser.get_cas_devices_dict()["cores"].values() if len(core_pool_list) != 0: TestRun.error(f"No cores expected in core pool. Got {core_pool_list}") else: TestRun.LOGGER.info("Core pool is ok") - expected_caches_paths = set([active_cache_path, standby_cache_path]) + expected_caches_paths = {active_cache_path, standby_cache_path} caches_paths = {c["device"] for c in caches_list} if caches_paths != expected_caches_paths: - TestRun.error( + TestRun.LOGGER.error( f"Expected the following devices as caches " f"{expected_caches_paths}. Got {caches_paths}" ) else: TestRun.LOGGER.info("Caches are ok") - expected_core_paths = set([active_core_path]) + expected_core_paths = {active_core_path} cores_paths = {c["device"] for c in cores_list} if cores_paths != expected_core_paths: - TestRun.error( + TestRun.LOGGER.error( f"Expected the following devices as cores " f"{expected_core_paths}. Got {cores_paths}" ) @@ -497,9 +503,9 @@ def test_failover_config_startup_negative(): """ title: Test unsuccessful boot with failover-specific configuration options description: | - Check that DUT doesn't boot successfully with misconfigured cache using failover-specific - configuration options (target_failover_state). After boot it should be verified that emergency - mode was in fact triggered. + Check that DUT doesn't boot successfully with misconfigured cache using failover-specific + configuration options (target_failover_state). After boot it should be verified that + emergency mode was in fact triggered. pass_criteria: - DUT enters emergency mode """ @@ -569,3 +575,125 @@ def validate_cache(cache_mode): f"Cache started in wrong mode!\n" f"Should start in {cache_mode}, but started in {current_mode} mode." ) + + +@pytest.mark.os_dependent +@pytest.mark.remote_only +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +@pytest.mark.parametrizex("cache_mode", CacheMode) +@pytest.mark.parametrizex("reboot_type", ["soft", "hard"]) +@pytest.mark.require_plugin("power_control") +def test_cas_lazy_startup_core_path_by_id(cache_mode, reboot_type): + """ + title: Test for CAS startup when cores are set in config with wrong by-id path. + description: | + Start cache, add to config different links to devices that make up the cache + and check if cache start fails after reboot. Clear cache metadata before reboot. + pass_criteria: + - System does not crash + - Cache is running after startup + - Cores are detached after startup + """ + + with TestRun.step("Prepare partitions for cache and for cores"): + cache_dev = TestRun.disks["cache"] + cache_dev.create_partitions([Size(200, Unit.MebiByte)]) + cache_part = cache_dev.partitions[0] + core_dev = TestRun.disks["core"] + core_dev.create_partitions([Size(400, Unit.MebiByte)] * cores_number) + + with TestRun.step("Start cache and add cores"): + cache = casadm.start_cache(cache_part, cache_mode, force=True) + for partition in core_dev.partitions: + cache.add_core(partition) + + with TestRun.step("Create opencas.conf"): + InitConfig.create_init_config_from_running_configuration( + cache_extra_flags="lazy_startup=true", + core_extra_flags="lazy_startup=true" + ) + + with TestRun.step("Stop cache and clear metadata before reboot"): + cache.stop() + casadm.zero_metadata(cache_part) + + with TestRun.step("Reset platform"): + if reboot_type == "soft": + TestRun.executor.reboot() + else: # wait few seconds to simulate power failure during normal system run + sleep(5) # not when configuring Open CAS + power_control = TestRun.plugin_manager.get_plugin("power_control") + power_control.power_cycle() + + with TestRun.step("Check if all cores are detached"): + listed_cores = casadm_parser.get_cas_devices_dict().get("core_pool") + listed_cores_number = len(listed_cores) + if listed_cores_number != cores_number: + TestRun.fail(f"Expected {cores_number} cores, got {listed_cores_number}!") + + for core in listed_cores.values(): + if core.get("status").lower() != CoreStatus.detached.name: + TestRun.fail(f"Core {core.get('device')} isn't detached as expected.") + + +@pytest.mark.os_dependent +@pytest.mark.remote_only +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +@pytest.mark.parametrizex("cache_mode", CacheMode) +@pytest.mark.parametrizex("reboot_type", ["soft", "hard"]) +@pytest.mark.require_plugin("power_control") +def test_cas_lazy_startup_core_path_not_by_id(cache_mode, reboot_type): + """ + title: Negative test for CAS startup when cores are set in config with short path. + description: | + Start cache, add to config short path (/dev/sdX) to devices that make up the + cache and check if cache start fails after reboot. Clear cache metadata before reboot. + pass_criteria: + - System does not crash + - Cache is not running after startup + - No cores after startup + """ + + with TestRun.step("Prepare partitions for cache and for cores."): + cache_dev = TestRun.disks["cache"] + cache_dev.create_partitions([Size(200, Unit.MebiByte)]) + cache_part = cache_dev.partitions[0] + core_dev = TestRun.disks["core"] + core_dev.create_partitions([Size(400, Unit.MebiByte)] * cores_number) + + with TestRun.step("Start cache and add cores."): + cache = casadm.start_cache(cache_part, cache_mode, force=True) + cores = [cache.add_core(partition) for partition in core_dev.partitions] + + with TestRun.step("Create opencas.conf."): + create_init_config(cache, cores, [readlink(part.path) for part in core_dev.partitions]) + + with TestRun.step("Stop cache and clear metadata before reboot."): + cache.stop() + casadm.zero_metadata(cache_part) + + with TestRun.step("Reset platform."): + if reboot_type == "soft": + TestRun.executor.reboot() + else: # wait few seconds to simulate power failure during normal system run + sleep(5) # not when configuring Open CAS + power_control = TestRun.plugin_manager.get_plugin('power_control') + power_control.power_cycle() + + with TestRun.step("Check if cache is not running."): + check_stdout_msg(casadm.list_caches(), no_caches_running) + + +def create_init_config(cache, cores, paths): + init_conf = InitConfig() + + init_conf.add_cache( + cache.cache_id, cache.cache_device, cache.get_cache_mode(), "lazy_startup=true" + ) + for core, path in zip(cores, paths): + params = [str(cache.cache_id), str(core.core_id), path, "lazy_startup=true"] + init_conf.core_config_lines.append('\t'.join(params)) + init_conf.save_config_file() + return init_conf