From 70c892e1e30a1f85134ac799075fbf446c153877 Mon Sep 17 00:00:00 2001 From: Kamil Gierszewski Date: Thu, 29 Aug 2024 12:04:26 +0200 Subject: [PATCH] tests: update tests: fixup --- .../512b/test_different_io_sizes_support.py | 2 +- test/functional/tests/basic/test_basic.py | 18 ++++---- test/functional/tests/basic/test_start_cas.py | 3 +- .../test_cleaning_policy_operation.py | 28 ++++++------ .../cache_ops/test_concurrent_flushes.py | 32 ++++++++------ .../tests/cache_ops/test_core_remove.py | 25 ++++++----- .../test_dynamic_cache_mode_switching.py | 44 +++++++++---------- .../tests/cache_ops/test_multilevel_cache.py | 5 ++- .../cache_ops/test_multistream_seq_cutoff.py | 21 ++++----- .../tests/cache_ops/test_seq_cutoff.py | 25 ++++++----- 10 files changed, 106 insertions(+), 97 deletions(-) diff --git a/test/functional/tests/512b/test_different_io_sizes_support.py b/test/functional/tests/512b/test_different_io_sizes_support.py index fc8c20e82..3d104c243 100644 --- a/test/functional/tests/512b/test_different_io_sizes_support.py +++ b/test/functional/tests/512b/test_different_io_sizes_support.py @@ -36,7 +36,7 @@ def test_support_different_io_size(cache_mode): - No IO errors """ - with TestRun.step("Prepare cache and core"): + with TestRun.step("Prepare cache and core devices"): cache_disk = TestRun.disks["cache"] core_disk = TestRun.disks["core"] cache_disk.create_partitions([Size(1, Unit.GibiByte)]) diff --git a/test/functional/tests/basic/test_basic.py b/test/functional/tests/basic/test_basic.py index 378a617b9..7a7e0c1f6 100644 --- a/test/functional/tests/basic/test_basic.py +++ b/test/functional/tests/basic/test_basic.py @@ -46,13 +46,12 @@ def test_cas_version(): with TestRun.step(f"Read cas version from {version_file_path} location"): file_read = fs_utils.read_file(version_file_path).split("\n") - file_version, *_ = [line for line in file_read if "CAS_VERSION=" in line] - file_cas_version_list = file_version.split("=")[1] + file_cas_version = next( + (line.split("=")[1] for line in file_read if "CAS_VERSION=" in line) + ) with TestRun.step("Compare cmd and file versions"): - if not all( - file_cas_version_list == file_cas_version for file_cas_version in cmd_cas_versions - ): + if not all(file_cas_version == cmd_cas_version for cmd_cas_version in cmd_cas_versions): TestRun.LOGGER.error(f"Cmd and file versions doesn`t match") @@ -69,9 +68,11 @@ def test_negative_start_cache(): - Fails to start cache on another partition with the same id """ - with TestRun.step("Partition cache devices"): + with TestRun.step("Prepare cache and core devices"): cache_dev = TestRun.disks["cache"] + cache_dev.create_partitions([Size(2, Unit.GibiByte)] * 2) + cache_dev_1 = cache_dev.partitions[0] cache_dev_2 = cache_dev.partitions[1] @@ -102,7 +103,6 @@ def test_negative_start_cache(): ) ) TestRun.fail("Two caches started with same ID") - except CmdException: if not check_stderr_msg(output, start_cache_with_existing_id): TestRun.fail(f"Received unexpected error message: {output.stderr}") @@ -125,7 +125,7 @@ def test_data_integrity(filesystem, cache_mode, cache_line_size): - Data consistency is preserved. """ - with TestRun.step("Partition cache and core devices"): + with TestRun.step("Prepare cache and core devices"): cache_device = TestRun.disks["cache"] core_device = TestRun.disks["core"] @@ -140,7 +140,7 @@ def test_data_integrity(filesystem, cache_mode, cache_line_size): cache_dev=cache_part, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True ) core = cache.add_core(core_dev=core_part) - + with TestRun.step("Create filesystem on CAS device and mount it"): core.create_filesystem(filesystem) core.mount(mountpoint) diff --git a/test/functional/tests/basic/test_start_cas.py b/test/functional/tests/basic/test_start_cas.py index 4c789e445..d34b6d50f 100644 --- a/test/functional/tests/basic/test_start_cas.py +++ b/test/functional/tests/basic/test_start_cas.py @@ -26,8 +26,9 @@ def test_start_cache_add_core(): """ with TestRun.step("Prepare cache and core devices"): cache_dev = TestRun.disks["cache"] - cache_dev.create_partitions([Size(500, Unit.MebiByte)]) core_dev = TestRun.disks["core"] + + cache_dev.create_partitions([Size(500, Unit.MebiByte)]) core_dev.create_partitions([Size(2, Unit.GibiByte)]) with TestRun.step("Start cache"): diff --git a/test/functional/tests/cache_ops/test_cleaning_policy_operation.py b/test/functional/tests/cache_ops/test_cleaning_policy_operation.py index 6bf2ce803..eb2e5fc86 100644 --- a/test/functional/tests/cache_ops/test_cleaning_policy_operation.py +++ b/test/functional/tests/cache_ops/test_cleaning_policy_operation.py @@ -38,7 +38,7 @@ @pytest.mark.parametrize("cleaning_policy", CleaningPolicy) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_cleaning_policies_in_write_back(cleaning_policy): +def test_cleaning_policies_in_write_back(cleaning_policy: CleaningPolicy): """ title: Test for cleaning policy operation in Write-Back cache mode. description: | @@ -50,10 +50,11 @@ def test_cleaning_policies_in_write_back(cleaning_policy): - Dirty data is flushed or not according to the policy used. """ - with TestRun.step("Partition cache and core devices"): + with TestRun.step("Prepare cache and core devices"): cache_dev = TestRun.disks["cache"] - cache_dev.create_partitions([Size(1, Unit.GibiByte)]) core_dev = TestRun.disks["core"] + + cache_dev.create_partitions([Size(1, Unit.GibiByte)]) core_dev.create_partitions([Size(2, Unit.GibiByte)] * cores_count) with TestRun.step("Disable udev"): @@ -62,7 +63,7 @@ def test_cleaning_policies_in_write_back(cleaning_policy): with TestRun.step(f"Start cache in Write-Back mode with {cleaning_policy} cleaning policy"): cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WB, force=True) cache.set_cleaning_policy(cleaning_policy=cleaning_policy) - set_params(cache, cleaning_policy) + set_cleaning_policy_params(cache, cleaning_policy) with TestRun.step("Check for running CAS cleaner"): output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}") @@ -88,13 +89,13 @@ def test_cleaning_policies_in_write_back(cleaning_policy): fio.run() time.sleep(3) - core_writes_before_wait_for_cleaning = cache.get_statistics().block_stats_cache.core.writes + core_writes_before_wait_for_cleaning = cache.get_statistics().block_stats.core.writes with TestRun.step(f"Wait {time_to_wait} seconds"): time.sleep(time_to_wait) with TestRun.step("Check write statistics for core device"): - core_writes_after_wait_for_cleaning = cache.get_statistics().block_stats_cache.core.writes + core_writes_after_wait_for_cleaning = cache.get_statistics().block_stats.core.writes check_cleaning_policy_operation( cleaning_policy, core_writes_before_wait_for_cleaning, @@ -117,10 +118,11 @@ def test_cleaning_policies_in_write_through(cleaning_policy): - Dirty data is flushed or not according to the policy used. """ - with TestRun.step("Partition cache and core devices"): + with TestRun.step("Prepare cache and core devices"): cache_dev = TestRun.disks["cache"] - cache_dev.create_partitions([Size(1, Unit.GibiByte)]) core_dev = TestRun.disks["core"] + + cache_dev.create_partitions([Size(1, Unit.GibiByte)]) core_dev.create_partitions([Size(2, Unit.GibiByte)] * cores_count) with TestRun.step("Disable udev"): @@ -128,7 +130,7 @@ def test_cleaning_policies_in_write_through(cleaning_policy): with TestRun.step(f"Start cache in Write-Through mode with {cleaning_policy} cleaning policy"): cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WT, force=True) - set_params(cache, cleaning_policy) + set_cleaning_policy_params(cache, cleaning_policy) with TestRun.step("Check for running CAS cleaner"): output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}") @@ -157,13 +159,13 @@ def test_cleaning_policies_in_write_through(cleaning_policy): with TestRun.step("Change cache mode back to Write-Through"): cache.set_cache_mode(CacheMode.WT, flush=False) - core_writes_before_wait_for_cleaning = cache.get_statistics().block_stats_cache.core.writes + core_writes_before_wait_for_cleaning = cache.get_statistics().block_stats.core.writes with TestRun.step(f"Wait {time_to_wait} seconds"): time.sleep(time_to_wait) with TestRun.step("Check write statistics for core device"): - core_writes_after_wait_for_cleaning = cache.get_statistics().block_stats_cache.core.writes + core_writes_after_wait_for_cleaning = cache.get_statistics().block_stats.core.writes check_cleaning_policy_operation( cleaning_policy, core_writes_before_wait_for_cleaning, @@ -171,11 +173,11 @@ def test_cleaning_policies_in_write_through(cleaning_policy): ) -def set_params(cache, cleaning_policy): +def set_cleaning_policy_params(cache, cleaning_policy): current_cleaning_policy = cache.get_cleaning_policy() if current_cleaning_policy != cleaning_policy: TestRun.LOGGER.error( - f"Cleaning policy is {current_cleaning_policy}, " f"should be {cleaning_policy}" + f"Cleaning policy is {current_cleaning_policy}, should be {cleaning_policy}" ) match cleaning_policy: diff --git a/test/functional/tests/cache_ops/test_concurrent_flushes.py b/test/functional/tests/cache_ops/test_concurrent_flushes.py index 1ca01ca0a..4c787dd46 100644 --- a/test/functional/tests/cache_ops/test_concurrent_flushes.py +++ b/test/functional/tests/cache_ops/test_concurrent_flushes.py @@ -21,7 +21,7 @@ @pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.hdd4k])) -def test_concurrent_cores_flush(cache_mode): +def test_concurrent_cores_flush(cache_mode: CacheMode): """ title: Fail to flush two cores simultaneously. description: | @@ -34,12 +34,14 @@ def test_concurrent_cores_flush(cache_mode): the same cache simultaneously. """ - with TestRun.step("Prepare cache and core"): + with TestRun.step("Prepare cache and core devices"): cache_dev = TestRun.disks["cache"] - cache_dev.create_partitions([Size(2, Unit.GibiByte)]) - cache_part = cache_dev.partitions[0] core_dev = TestRun.disks["core"] + + cache_dev.create_partitions([Size(2, Unit.GibiByte)]) core_dev.create_partitions([Size(5, Unit.GibiByte)] * 2) + + cache_part = cache_dev.partitions[0] core_part1 = core_dev.partitions[0] core_part2 = core_dev.partitions[1] @@ -64,7 +66,7 @@ def test_concurrent_cores_flush(cache_mode): .target(core.path) .size(core.size) .block_size(Size(4, Unit.MebiByte)) - .read_write(ReadWrite.randwrite) + .read_write(ReadWrite.write) .direct(1) ) fio_pid = fio.run_in_background() @@ -147,7 +149,7 @@ def test_concurrent_cores_flush(cache_mode): @pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_concurrent_caches_flush(cache_mode): +def test_concurrent_caches_flush(cache_mode: CacheMode): """ title: Success to flush two caches simultaneously. description: | @@ -156,16 +158,20 @@ def test_concurrent_caches_flush(cache_mode): - No system crash. - Flush for each cache should finish successfully. """ + caches_number = 3 - with TestRun.step("Prepare caches and cores"): + with TestRun.step("Prepare cache and core devices"): cache_dev = TestRun.disks["cache"] - cache_dev.create_partitions([Size(2, Unit.GibiByte)] * 3) core_dev = TestRun.disks["core"] - core_dev.create_partitions([Size(2, Unit.GibiByte) * 2] * 3) - with TestRun.step(f"Start 3 caches"): - caches = [casadm.start_cache(cache_dev=part, cache_mode=cache_mode, force=True) for part in - cache_dev.partitions] + cache_dev.create_partitions([Size(2, Unit.GibiByte)] * caches_number) + core_dev.create_partitions([Size(2, Unit.GibiByte) * 2] * caches_number) + + with TestRun.step(f"Start {caches_number} caches"): + caches = [ + casadm.start_cache(cache_dev=part, cache_mode=cache_mode, force=True) + for part in cache_dev.partitions + ] with TestRun.step("Disable cleaning and sequential cutoff"): for cache in caches: @@ -192,7 +198,7 @@ def test_concurrent_caches_flush(cache_mode): with TestRun.step("Check if each cache is full of dirty blocks"): for cache in caches: - if not cache.get_dirty_blocks() != core.size.set_unit(Unit.Blocks4096): + if not cache.get_dirty_blocks() != core.size: TestRun.fail(f"The cache {cache.cache_id} does not contain dirty blocks") with TestRun.step("Start flush operation on all caches simultaneously"): diff --git a/test/functional/tests/cache_ops/test_core_remove.py b/test/functional/tests/cache_ops/test_core_remove.py index b21273a32..2129e72bb 100644 --- a/test/functional/tests/cache_ops/test_core_remove.py +++ b/test/functional/tests/cache_ops/test_core_remove.py @@ -16,14 +16,14 @@ from test_utils.size import Size, Unit mount_point = "/mnt/cas" +cores_amount = 3 @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_remove_core_when_other_mounted_auto_numeration(): """ - title: Test for removing one core from the cache when the other core is mounted. - Cores are numerated automatically. + title: Remove one core when other are mounted - auto-numerated. description: | Test of the ability to remove the unmounted core from the cache when the other core is mounted and its ID starts with a different digit. @@ -32,16 +32,17 @@ def test_remove_core_when_other_mounted_auto_numeration(): - Removing unmounted core finished with success. """ - with TestRun.step("Prepare cache and core"): + with TestRun.step("Prepare cache and core devices"): cache_device = TestRun.disks["cache"] - cache_device.create_partitions([Size(50, Unit.MebiByte)]) core_device = TestRun.disks["core"] - core_device.create_partitions([Size(200, Unit.MebiByte)] * 3) + + cache_device.create_partitions([Size(50, Unit.MebiByte)]) + core_device.create_partitions([Size(200, Unit.MebiByte)] * cores_amount) with TestRun.step("Start cache"): cache = casadm.start_cache(cache_device.partitions[0], force=True) - with TestRun.step("Add cores to cache and mount them except the first one"): + with TestRun.step(f"Add {cores_amount} cores to cache and mount them except the first one"): free_core = cache.add_core(core_device.partitions[0]) mounted_cores = [] for i, part in enumerate(core_device.partitions[1:]): @@ -62,8 +63,7 @@ def test_remove_core_when_other_mounted_auto_numeration(): @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_remove_core_when_other_mounted_custom_numeration(): """ - title: Test for removing one core from the cache when the other core is mounted. - Cores have custom numeration, starting with the same digit. + title: Remove one core when other are mounted - custom numeration. description: | Test of the ability to remove the unmounted core from the cache when the other core is mounted and its ID starts with the same digit. @@ -72,16 +72,17 @@ def test_remove_core_when_other_mounted_custom_numeration(): - Removing unmounted core finished with success. """ - with TestRun.step("Prepare devices"): + with TestRun.step("Prepare cache and core devices"): cache_device = TestRun.disks["cache"] - cache_device.create_partitions([Size(50, Unit.MebiByte)]) core_device = TestRun.disks["core"] + + cache_device.create_partitions([Size(50, Unit.MebiByte)]) core_device.create_partitions([Size(200, Unit.MebiByte)] * 3) with TestRun.step("Start cache"): cache = casadm.start_cache(cache_device.partitions[0], force=True) - with TestRun.step("Add cores to cache and mount them except the first one"): + with TestRun.step(f"Add {cores_amount} cores to cache and mount them except the first one"): random_prefix = random.randint(1, 9) random_interfix = random.randint(1, 9) @@ -91,7 +92,7 @@ def test_remove_core_when_other_mounted_custom_numeration(): for i, part in enumerate(core_device.partitions[1:]): part.create_filesystem(Filesystem.xfs) mounted_cores.append( - cache.add_core(core_dev=part, core_id=f"{random_prefix}{random_interfix}{i}") + cache.add_core(core_dev=part, core_id=int(f"{random_prefix}{random_interfix}{i}")) ) mounted_cores[i].mount( mount_point=f"{mount_point}{cache.cache_id}-{mounted_cores[i].core_id}" diff --git a/test/functional/tests/cache_ops/test_dynamic_cache_mode_switching.py b/test/functional/tests/cache_ops/test_dynamic_cache_mode_switching.py index 49f23b6bd..bb40b07b5 100644 --- a/test/functional/tests/cache_ops/test_dynamic_cache_mode_switching.py +++ b/test/functional/tests/cache_ops/test_dynamic_cache_mode_switching.py @@ -145,10 +145,11 @@ def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mo - Cache mode is switched without errors. """ - with TestRun.step("Partition cache and core devices"): + with TestRun.step("Prepare cache and core devices"): cache_dev = TestRun.disks["cache"] - cache_dev.create_partitions([Size(1, Unit.GibiByte)]) core_dev = TestRun.disks["core"] + + cache_dev.create_partitions([Size(1, Unit.GibiByte)]) core_dev.create_partitions([Size(2, Unit.GibiByte)]) with TestRun.step(f"Start cache in {cache_mode_1} mode"): @@ -217,13 +218,13 @@ def run_io_and_verify(cache, core, io_mode): fio_prepare(core, io_mode).run() sync() cache_mode = cache.get_cache_mode() - cache_stats = cache.get_statistics() - core_stats = core.get_statistics() + cache_block_stats = cache.get_statistics().block_stats + core_block_stats = core.get_statistics().block_stats match cache_mode: case CacheMode.WB: if ( - core_stats.block_stats_cache.core.writes.value != 0 - or core_stats.block_stats_cache.exp_obj.writes.value <= 0 + cache_block_stats.core.writes.value != 0 + or cache_block_stats.exp_obj.writes.value <= 0 ): TestRun.fail( "Write-Back cache mode is not working properly! " @@ -231,15 +232,15 @@ def run_io_and_verify(cache, core, io_mode): ) case CacheMode.PT: if ( - cache_stats.block_stats_cache.cache.writes.value != 0 - or cache_stats.block_stats_cache.cache.reads.value != 0 + cache_block_stats.cache.writes.value != 0 + or cache_block_stats.cache.reads.value != 0 ): TestRun.fail( "Pass-Through cache mode is not working properly! " "There should be no reads or writes from/to cache" ) case CacheMode.WT: - if cache_stats.block_stats_cache.cache != cache_stats.block_stats_cache.core: + if cache_block_stats.cache != cache_block_stats.core: TestRun.fail( "Write-Through cache mode is not working properly! " "'cache writes' and 'core writes' counts should be the same" @@ -247,15 +248,15 @@ def run_io_and_verify(cache, core, io_mode): case CacheMode.WA: if io_mode == ReadWrite.randread: if ( - cache_stats.block_stats_cache.cache.writes != io_size - or cache_stats.block_stats_cache.core.reads != io_size + cache_block_stats.cache.writes != io_size + or cache_block_stats.core.reads != io_size ): TestRun.fail( "Write-Around cache mode is not working properly for data reads! " "'cache writes' and 'core reads' should equal total data reads" ) if io_mode == ReadWrite.randwrite: - if cache_stats.block_stats_cache.cache.writes != io_size: + if cache_block_stats.cache.writes != io_size: TestRun.fail( "Write-Around cache mode is not working properly for data writes! " "There should be no writes to cache since previous read operation" @@ -263,8 +264,8 @@ def run_io_and_verify(cache, core, io_mode): case CacheMode.WO: if io_mode == ReadWrite.randread: if ( - cache_stats.block_stats_cache.cache.writes.value != 0 - or cache_stats.block_stats_cache.cache.reads.value != 0 + cache_block_stats.cache.writes.value != 0 + or cache_block_stats.cache.reads.value != 0 ): TestRun.fail( "Write-Only cache mode is not working properly for data reads! " @@ -272,8 +273,8 @@ def run_io_and_verify(cache, core, io_mode): ) if io_mode == ReadWrite.randwrite: if ( - core_stats.block_stats_cache.core.writes.value != 0 - or core_stats.block_stats_cache.exp_obj.writes != io_size + core_block_stats.core.writes.value != 0 + or core_block_stats.exp_obj.writes != io_size ): TestRun.fail( "Write-Only cache mode is not working properly for data writes! " @@ -290,8 +291,7 @@ def check_separated_read_write_after_reload(cache, core, cache_mode, io_size): io_mode = ReadWrite.randread fio_prepare(core, io_mode, io_size_after_reload).run() sync() - cache_stats = cache.get_statistics() - cache_block_stats = cache.get_statistics().block_stats_cache + cache_block_stats = cache.get_statistics().block_stats io_new_data = io_size_after_reload - io_size if cache_mode == CacheMode.WA: @@ -306,8 +306,8 @@ def check_separated_read_write_after_reload(cache, core, cache_mode, io_size): ) if cache_mode == CacheMode.WO: if ( - cache_stats.block_stats_cache.cache.writes != Size.zero() - or cache_stats.block_stats_cache.cache.reads != io_size + cache_block_stats.cache.writes != Size.zero() + or cache_block_stats.cache.reads != io_size ): TestRun.fail( "Write-Only cache mode is not working properly for data reads after reload! " @@ -319,8 +319,8 @@ def check_separated_read_write_after_reload(cache, core, cache_mode, io_size): io_mode = ReadWrite.randwrite fio_prepare(core, io_mode, io_size_after_reload).run() sync() - cache_block_stats = cache.get_statistics().block_stats_cache - core_block_stats = core.get_statistics().block_stats_cache + cache_block_stats = cache.get_statistics().block_stats + core_block_stats = core.get_statistics().block_stats match cache_mode: case CacheMode.WA: diff --git a/test/functional/tests/cache_ops/test_multilevel_cache.py b/test/functional/tests/cache_ops/test_multilevel_cache.py index cedfa4338..a132273aa 100644 --- a/test/functional/tests/cache_ops/test_multilevel_cache.py +++ b/test/functional/tests/cache_ops/test_multilevel_cache.py @@ -25,10 +25,11 @@ def test_remove_multilevel_core(): - OpenCAS does not allow removing a core used in a multilevel cache instance. """ - with TestRun.step("Partition cache and core devices"): + with TestRun.step("Prepare cache and core devices"): cache_dev = TestRun.disks["cache"] - cache_dev.create_partitions([Size(512, Unit.MebiByte)] * 2) core_dev = TestRun.disks["core"] + + cache_dev.create_partitions([Size(512, Unit.MebiByte)] * 2) core_dev.create_partitions([Size(1, Unit.GibiByte)]) with TestRun.step("Start the first cache instance"): diff --git a/test/functional/tests/cache_ops/test_multistream_seq_cutoff.py b/test/functional/tests/cache_ops/test_multistream_seq_cutoff.py index e66995550..354716f31 100644 --- a/test/functional/tests/cache_ops/test_multistream_seq_cutoff.py +++ b/test/functional/tests/cache_ops/test_multistream_seq_cutoff.py @@ -51,14 +51,12 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold): sequential cutoff threshold """ - with TestRun.step("Prepare cache and core devices"): - cache_disk = TestRun.disks["cache"] - core_disk = TestRun.disks["core"] - with TestRun.step("Disable udev"): Udev.disable() with TestRun.step(f"Start cache in Write-Back"): + cache_disk = TestRun.disks["cache"] + core_disk = TestRun.disks["core"] cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True) core = cache.add_core(core_disk) @@ -110,7 +108,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold): for i in TestRun.iteration(range(0, len(offsets))): TestRun.LOGGER.info(f"Statistics before second I/O:\n{core_statistics_before}") - additional_4k_blocks_writes = random.randint(1, int(kib_between_streams / 4)) + additional_4k_blocks_writes = random.randint(1, kib_between_streams // 4) offset = Size(offsets[i], Unit.KibiByte) run_dd( core.path, @@ -146,9 +144,10 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand): - No system crash """ - with TestRun.step("Partition cache and core devices"): + with TestRun.step("Prepare cache and core devices"): cache_disk = TestRun.disks["cache"] core_disk = TestRun.disks["core"] + cache_disk.create_partitions([Size(1.5, Unit.GibiByte)]) with TestRun.step(f"Disable udev"): @@ -213,14 +212,12 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo - No system crash """ - with TestRun.step("Partition cache and core devices"): - cache_disk = TestRun.disks["cache"] - core_disk = TestRun.disks["core"] - with TestRun.step(f"Disable udev"): Udev.disable() with TestRun.step("Create filesystem on core device"): + cache_disk = TestRun.disks["cache"] + core_disk = TestRun.disks["core"] core_disk.create_filesystem(filesystem) with TestRun.step("Start cache and add core"): @@ -281,8 +278,8 @@ def run_dd(target_path, count, seek): def check_statistics(stats_before, stats_after, expected_pt, expected_writes_to_cache): TestRun.LOGGER.info(f"Statistics after I/O:\n{stats_after}") - writes_to_cache_before = stats_before.block_stats_cache.cache.writes - writes_to_cache_after = stats_after.block_stats_cache.cache.writes + writes_to_cache_before = stats_before.block_stats.cache.writes + writes_to_cache_after = stats_after.block_stats.cache.writes pt_writes_before = stats_before.request_stats.pass_through_writes pt_writes_after = stats_after.request_stats.pass_through_writes diff --git a/test/functional/tests/cache_ops/test_seq_cutoff.py b/test/functional/tests/cache_ops/test_seq_cutoff.py index 649323794..a0770adfe 100644 --- a/test/functional/tests/cache_ops/test_seq_cutoff.py +++ b/test/functional/tests/cache_ops/test_seq_cutoff.py @@ -50,16 +50,17 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz - Amount of written blocks to cache is equal to io size run against last core. """ - with TestRun.step("Partition cache and core devices"): + with TestRun.step("Prepare cache and core devices"): cache_device = TestRun.disks["cache"] core_device = TestRun.disks["core"] + cache_device.create_partitions( - [(SEQ_CUTOFF_THRESHOLD_MAX * 4 - + Size(value=5, unit=Unit.GibiByte))] + [(SEQ_CUTOFF_THRESHOLD_MAX * 4 + Size(value=5, unit=Unit.GibiByte))] ) core_device.create_partitions( [(SEQ_CUTOFF_THRESHOLD_MAX + Size(value=10, unit=Unit.GibiByte))] * 4 ) + cache_part = cache_device.partitions[0] core_parts = core_device.partitions @@ -74,7 +75,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz ) core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts] - with TestRun.step(f"Set sequential cut-off parameters for all cores"): + with TestRun.step("Set sequential cut-off parameters for all cores"): writes_before_list = [] fio_additional_size = Size(10, Unit.Blocks4096) thresholds_list = [ @@ -103,14 +104,14 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz fio_job.size(io_size) fio_job.read_write(io_type) fio_job.target(core.path) - writes_before_list.append(core.get_statistics().block_stats_cache.cache.writes) + writes_before_list.append(core.get_statistics().block_stats.cache.writes) with TestRun.step("Prepare random IO against the last core"): fio_job = fio.add_job(f"core_{core_list[-1].core_id}") fio_job.size(io_sizes_list[-1]) fio_job.read_write(io_type_last) fio_job.target(core_list[-1].path) - writes_before_list.append(core_list[-1].get_statistics().block_stats_cache.cache.writes) + writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes) with TestRun.step("Run fio against all cores"): fio.run() @@ -233,14 +234,14 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach fio_job.size(io_size) fio_job.read_write(io_type) fio_job.target(core.path) - writes_before_list.append(core.get_statistics().block_stats_cache.cache.writes) + writes_before_list.append(core.get_statistics().block_stats.cache.writes) # Run random IO against the last core fio_job = fio.add_job(job_name=f"core_{core_list[-1].core_id}") fio_job.size(io_sizes_list[-1]) fio_job.read_write(io_type_last) fio_job.target(core_list[-1].path) - writes_before_list.append(core_list[-1].get_statistics().block_stats_cache.cache.writes) + writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes) with TestRun.step("Running IO against all cores"): fio.run() @@ -332,7 +333,7 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type): with TestRun.step("Prepare sequential IO against core"): sync() - writes_before = core.get_statistics().block_stats_cache.cache.writes + writes_before = core.get_statistics().block_stats.cache.writes fio = ( Fio() .create_command() @@ -396,7 +397,7 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir): cache_line_size=cache_line_size, ) core = cache.add_core(core_dev=core_part) - + fio_additional_size = Size(10, Unit.Blocks4096) threshold = Size.generate_random_size( min_size=1, @@ -438,7 +439,7 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir): with TestRun.step(f"Running sequential IO ({io_dir})"): sync() - writes_before = core.get_statistics().block_stats_cache.cache.writes + writes_before = core.get_statistics().block_stats.cache.writes fio = ( Fio() .create_command() @@ -464,7 +465,7 @@ def verify_writes_count( ver_type=VerifyType.NEGATIVE, io_margin=Size(8, Unit.KibiByte), ): - writes_after = core.get_statistics().block_stats_cache.cache.writes + writes_after = core.get_statistics().block_stats.cache.writes writes_difference = writes_after - writes_before match ver_type: case VerifyType.NEGATIVE: