diff --git a/.gitignore b/.gitignore index f44b86e2c..c277861b8 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *.ko.* *.obj *.a +tags Module.symvers Module.markers *.mod.c diff --git a/Makefile b/Makefile index 2d0b76a7a..054e39eb7 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ # # Copyright(c) 2012-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies # SPDX-License-Identifier: BSD-3-Clause # @@ -20,6 +21,8 @@ ifneq ($(MAKECMDGOALS),srpm) ifneq ($(MAKECMDGOALS),deb) ifneq ($(MAKECMDGOALS),dsc) cd $@ && $(MAKE) $(MAKECMDGOALS) +casadm: modules + cd $@ && $(MAKE) $(MAKECMDGOALS) endif endif endif diff --git a/casadm/Makefile b/casadm/Makefile index 8ab0c4e87..d010125d3 100644 --- a/casadm/Makefile +++ b/casadm/Makefile @@ -152,7 +152,9 @@ $(OBJDIR)%.o: %.c ifeq ($(strip $(CAS_VERSION_MAIN)),) $(error "No version file") endif - @$(CC) -c $(CFLAGS) -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@:%.o=%.d)" -o "$@" "$<" + @$(CC) -c $(CFLAGS) -MMD -o "$@" "$<" + +-include $(addprefix $(OBJDIR),$(OBJS:.o=.d)) clean: @echo " CLEAN " diff --git a/casadm/argp.c b/casadm/argp.c index 1ad53f207..c27555bcf 100644 --- a/casadm/argp.c +++ b/casadm/argp.c @@ -623,8 +623,8 @@ int args_parse(app *app_values, cli_command *commands, int argc, const char **ar } if (is_command_blocked(commands, i)) { - cas_printf(LOG_ERR, "The command is not supported\n"); - return FAILURE; + cas_printf(LOG_INFO, "The command is not supported\n"); + return SUCCESS; } configure_cli_commands(commands); diff --git a/casadm/cas_main.c b/casadm/cas_main.c index f41fb5a4e..1e361bd5d 100644 --- a/casadm/cas_main.c +++ b/casadm/cas_main.c @@ -2237,7 +2237,7 @@ static cli_command cas_commands[] = { .options = attach_cache_options, .command_handle_opts = start_cache_command_handle_option, .handle = handle_cache_attach, - .flags = CLI_SU_REQUIRED, + .flags = (CLI_SU_REQUIRED | CLI_COMMAND_BLOCKED), .help = NULL, }, { @@ -2247,7 +2247,7 @@ static cli_command cas_commands[] = { .options = detach_options, .command_handle_opts = command_handle_option, .handle = handle_cache_detach, - .flags = CLI_SU_REQUIRED, + .flags = (CLI_SU_REQUIRED | CLI_COMMAND_BLOCKED), .help = NULL, }, { diff --git a/casadm/casadm.8 b/casadm/casadm.8 index 3a5d4e95b..16fc0bef1 100644 --- a/casadm/casadm.8 +++ b/casadm/casadm.8 @@ -88,7 +88,7 @@ Stop cache instance. Set runtime parameter for cache/core instance. .TP -.B -G, --set-param +.B -G, --get-param Get runtime parameter for cache/core instance. .TP diff --git a/configure.d/1_append_bio.conf b/configure.d/1_append_bio.conf deleted file mode 100644 index 8c5346389..000000000 --- a/configure.d/1_append_bio.conf +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# -# Copyright(c) 2012-2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# - -. $(dirname $3)/conf_framework.sh - - -check() { - cur_name=$(basename $2) - config_file_path=$1 - if compile_module $cur_name "struct bio *b;blk_rq_append_bio(NULL, &b);" "linux/blkdev.h" - then - echo $cur_name 1 >> $config_file_path - else - echo $cur_name 2 >> $config_file_path - fi -} - -apply() { - case "$1" in - "1") - add_define "cas_blk_rq_append_bio(rq, bounce_bio) \\ - blk_rq_append_bio(rq, &bounce_bio)" ;; - "2") - add_define "cas_blk_rq_append_bio(rq, bounce_bio) \\ - blk_rq_append_bio(rq, bounce_bio)" ;; - *) - exit 1 - esac -} - -conf_run $@ diff --git a/configure.d/1_bio_iter.conf b/configure.d/1_bio_iter.conf index be16c1313..dfef373fb 100644 --- a/configure.d/1_bio_iter.conf +++ b/configure.d/1_bio_iter.conf @@ -1,6 +1,7 @@ #!/bin/bash # # Copyright(c) 2012-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies # SPDX-License-Identifier: BSD-3-Clause # @@ -25,15 +26,11 @@ apply() { "1") add_define "CAS_BIO_BISIZE(bio) \\ bio->bi_iter.bi_size" - add_define "CAS_BIO_BIIDX(bio) \\ - bio->bi_iter.bi_idx" add_define "CAS_BIO_BISECTOR(bio) \\ bio->bi_iter.bi_sector" ;; "2") add_define "CAS_BIO_BISIZE(bio) \\ bio->bi_size" - add_define "CAS_BIO_BIIDX(bio) \\ - bio->bi_idx" add_define "CAS_BIO_BISECTOR(bio) \\ bio->bi_sector" ;; *) diff --git a/configure.d/1_blk_end_req.conf b/configure.d/1_blk_end_req.conf deleted file mode 100644 index 91a22bcd1..000000000 --- a/configure.d/1_blk_end_req.conf +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# -# Copyright(c) 2012-2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# - -. $(dirname $3)/conf_framework.sh - -check() { - cur_name=$(basename $2) - config_file_path=$1 - if compile_module $cur_name "blk_mq_end_request(NULL, 0);" "linux/blk-mq.h" - then - echo $cur_name "1" >> $config_file_path - elif compile_module $cur_name "blk_end_request_all(NULL, 0);" "linux/blkdev.h" - then - echo $cur_name "2" >> $config_file_path - else - echo $cur_name "X" >> $config_file_path - fi -} - -apply() { - case "$1" in - "1") - add_define "CAS_END_REQUEST_ALL blk_mq_end_request" ;; - "2") - add_define "CAS_END_REQUEST_ALL blk_end_request_all" ;; - *) - exit 1 - esac -} - -conf_run $@ diff --git a/configure.d/1_kallsyms_on_each_symbol.conf b/configure.d/1_kallsyms_on_each_symbol.conf deleted file mode 100644 index 419afd44a..000000000 --- a/configure.d/1_kallsyms_on_each_symbol.conf +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# -# Copyright(c) 2012-2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# - -. $(dirname $3)/conf_framework.sh - -check() { - cur_name=$(basename $2) - config_file_path=$1 - if compile_module $cur_name "kallsyms_on_each_symbol(NULL, NULL);" "linux/fs.h" - then - echo $cur_name "1" >> $config_file_path - else - echo $cur_name "2" >> $config_file_path - fi -} - -apply() { - case "$1" in - "1") - add_define "SYMBOL_LOOKUP_SUPPORTED 1" ;; - "2") - ;; - *) - exit 1 - esac -} - -conf_run $@ diff --git a/configure.d/1_queue_bounce.conf b/configure.d/1_queue_bounce.conf deleted file mode 100644 index dc737c1d8..000000000 --- a/configure.d/1_queue_bounce.conf +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -# Copyright(c) 2012-2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# - -. $(dirname $3)/conf_framework.sh - -check() { - cur_name=$(basename $2) - config_file_path=$1 - if compile_module $cur_name "blk_queue_bounce(NULL, NULL);" "linux/blkdev.h" - then - echo $cur_name "1" >> $config_file_path - else - echo $cur_name "2" >> $config_file_path - fi -} - -apply() { - case "$1" in - "1") - add_define "cas_blk_queue_bounce(q, bounce_bio) \\ - blk_queue_bounce(q, bounce_bio)" ;; - "2") - add_define "cas_blk_queue_bounce(q, bounce_bio) \\ - ({})" ;; - *) - exit 1 - esac -} - -conf_run $@ diff --git a/configure.d/1_queue_limits.conf b/configure.d/1_queue_limits.conf index a9059bc2f..1f9149b77 100644 --- a/configure.d/1_queue_limits.conf +++ b/configure.d/1_queue_limits.conf @@ -49,6 +49,14 @@ apply() { exp_q->limits.max_write_same_sectors = 0; }" + # A workaround for RHEL/CentOS 7.3 bug in kernel. + # Merging implementation on blk-mq does not respect virt boundary + # restriction and front merges bios with non-zero offsets. + # This leads to request with gaps between bios and in consequence + # triggers BUG_ON() in nvme driver or silently corrupts data. + # To prevent this, disable merging on cache queue if there are + # requirements regarding virt boundary (marking bios with REQ_NOMERGE + # does not solve this problem). add_function " static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q) { diff --git a/configure.d/1_queue_lock.conf b/configure.d/1_queue_lock.conf deleted file mode 100644 index 37f77226e..000000000 --- a/configure.d/1_queue_lock.conf +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# -# Copyright(c) 2012-2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# - -. $(dirname $3)/conf_framework.sh - -check() { - cur_name=$(basename $2) - config_file_path=$1 - if compile_module $cur_name "struct request_queue *q; spin_lock_irq(q->queue_lock);"\ - "linux/blkdev.h" - then - echo $cur_name "1" >> $config_file_path - elif compile_module $cur_name "struct request_queue *q; spin_lock_irq(&q->queue_lock);"\ - "linux/blkdev.h" - then - echo $cur_name "2" >> $config_file_path - else - echo $cur_name "X" >> $config_file_path - fi -} - -apply() { - case "$1" in - "1") - add_define "CAS_QUEUE_SPIN_LOCK(q) spin_lock_irq(q->queue_lock)" - add_define "CAS_QUEUE_SPIN_UNLOCK(q) spin_unlock_irq(q->queue_lock)" ;; - "2") - add_define "CAS_QUEUE_SPIN_LOCK(q) spin_lock_irq(&q->queue_lock)" - add_define "CAS_QUEUE_SPIN_UNLOCK(q) spin_unlock_irq(&q->queue_lock)" ;; - *) - exit 1 - esac -} - -conf_run $@ diff --git a/modules/cas_cache/context.c b/modules/cas_cache/context.c index ed5b1cc29..94eb55a73 100644 --- a/modules/cas_cache/context.c +++ b/modules/cas_cache/context.c @@ -82,7 +82,7 @@ static int _cas_page_get_cpu(struct page *page) /* * */ -static ctx_data_t *__cas_ctx_data_alloc(uint32_t pages, bool zalloc) +static ctx_data_t *__cas_ctx_data_alloc(uint32_t pages) { struct blk_data *data; uint32_t i; @@ -116,14 +116,6 @@ static ctx_data_t *__cas_ctx_data_alloc(uint32_t pages, bool zalloc) if (!data->vec[i].bv_page) break; - if (zalloc) { - if (!page_addr) { - page_addr = page_address( - data->vec[i].bv_page); - } - memset(page_addr, 0, PAGE_SIZE); - } - data->vec[i].bv_len = PAGE_SIZE; data->vec[i].bv_offset = 0; } @@ -153,12 +145,7 @@ static ctx_data_t *__cas_ctx_data_alloc(uint32_t pages, bool zalloc) ctx_data_t *cas_ctx_data_alloc(uint32_t pages) { - return __cas_ctx_data_alloc(pages, false); -} - -ctx_data_t *cas_ctx_data_zalloc(uint32_t pages) -{ - return __cas_ctx_data_alloc(pages, true); + return __cas_ctx_data_alloc(pages); } /* diff --git a/modules/cas_cache/context.h b/modules/cas_cache/context.h index 5e9578946..164ea1427 100644 --- a/modules/cas_cache/context.h +++ b/modules/cas_cache/context.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -37,7 +38,7 @@ struct blk_data { /** * @brief CAS IO with which data is associated */ - struct ocf_io *io; + ocf_io_t io; /** * @brief Timestamp of start processing request @@ -69,7 +70,6 @@ struct blk_data *cas_alloc_blk_data(uint32_t size, gfp_t flags); void cas_free_blk_data(struct blk_data *data); ctx_data_t *cas_ctx_data_alloc(uint32_t pages); -ctx_data_t *cas_ctx_data_zalloc(uint32_t pages); void cas_ctx_data_free(ctx_data_t *ctx_data); void cas_ctx_data_secure_erase(ctx_data_t *ctx_data); diff --git a/modules/cas_cache/debug.h b/modules/cas_cache/debug.h index cc7d2b10e..3dac09ff8 100644 --- a/modules/cas_cache/debug.h +++ b/modules/cas_cache/debug.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ #ifndef __CASDISK_DEBUG_H__ @@ -12,7 +13,7 @@ printk(KERN_INFO "%s\n", __func__) #define CAS_DEBUG_DISK_TRACE(dsk) \ - printk(KERN_INFO "[%u] %s\n", dsk->id, __func__) + printk(KERN_INFO "[%s] %s\n", dsk->path, __func__) #define CAS_DEBUG_MSG(msg) \ printk(KERN_INFO "%s - %s\n", __func__, msg) @@ -22,8 +23,8 @@ __func__, ##__VA_ARGS__) #define CAS_DEBUG_DISK(dsk, format, ...) \ - printk(KERN_INFO "[%u] %s - "format"\n", \ - dsk->id, \ + printk(KERN_INFO "[%s] %s - "format"\n", \ + dsk->path, \ __func__, ##__VA_ARGS__) #define CAS_DEBUG_ERROR(error, ...) \ diff --git a/modules/cas_cache/exp_obj.c b/modules/cas_cache/exp_obj.c index d4b329a07..fce9714d6 100644 --- a/modules/cas_cache/exp_obj.c +++ b/modules/cas_cache/exp_obj.c @@ -37,12 +37,9 @@ static inline void bd_release_from_disk(struct block_device *bdev, #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) #define KRETURN(x) ({ return (x); }) #define MAKE_RQ_RET_TYPE blk_qc_t -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) +#else #define KRETURN(x) return #define MAKE_RQ_RET_TYPE void -#else - #define KRETURN(x) ({ return (x); }) - #define MAKE_RQ_RET_TYPE int #endif /* For RHEL 9.x we assume backport from kernel 5.18+ */ diff --git a/modules/cas_cache/exp_obj.h b/modules/cas_cache/exp_obj.h index 1c536578b..be52f5a93 100644 --- a/modules/cas_cache/exp_obj.h +++ b/modules/cas_cache/exp_obj.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ #ifndef __CASDISK_EXP_OBJ_H__ @@ -46,9 +47,6 @@ struct cas_exp_obj { struct blk_mq_tag_set tag_set; - atomic_t pt_ios; - atomic_t *pending_rqs; - void *private; }; diff --git a/modules/cas_cache/layer_cache_management.c b/modules/cas_cache/layer_cache_management.c index 15ef0e731..7c11d3a8f 100644 --- a/modules/cas_cache/layer_cache_management.c +++ b/modules/cas_cache/layer_cache_management.c @@ -1075,6 +1075,9 @@ int cache_mngt_core_pool_get_paths(struct kcas_core_pool_path *cmd_info) struct get_paths_ctx visitor_ctx = {0}; int result; + if (cmd_info->core_path_tab == NULL) + return -EINVAL; + visitor_ctx.core_path_name_tab = cmd_info->core_path_tab; visitor_ctx.max_count = cmd_info->core_pool_count; @@ -2058,6 +2061,7 @@ static int _cache_mngt_start_queues(ocf_cache_t cache) cache_priv->mngt_queue, CAS_CPUS_ALL); if (result) { ocf_queue_put(cache_priv->mngt_queue); + cache_priv->mngt_queue = NULL; goto err; } @@ -2534,20 +2538,27 @@ int cache_mngt_attach_device(const char *cache_name, size_t name_len, result = ocf_mngt_cache_get_by_name(cas_ctx, cache_name, OCF_CACHE_NAME_SIZE, &cache); - if (result) + if (result) { + ocf_volume_destroy(attach_cfg->device.volume); goto err_get; + } result = _cache_mngt_lock_sync(cache); - if (result) + if (result) { + ocf_volume_destroy(attach_cfg->device.volume); goto err_lock; + } result = cache_mngt_check_bdev(&attach_cfg->device, attach_cfg->force, true, cache); - if (result) + if (result) { + ocf_volume_destroy(attach_cfg->device.volume); goto err_ctx; + } context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) { + ocf_volume_destroy(attach_cfg->device.volume); result = -ENOMEM; goto err_ctx; } @@ -2697,17 +2708,21 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, switch (cmd->init_cache) { case CACHE_INIT_STANDBY_NEW: case CACHE_INIT_STANDBY_LOAD: + ocf_volume_destroy(attach_cfg->device.volume); printk(KERN_ERR "Standby mode is not supported!\n"); return -ENOTSUP; default: break; } - if (!try_module_get(THIS_MODULE)) + if (!try_module_get(THIS_MODULE)) { + ocf_volume_destroy(attach_cfg->device.volume); return -KCAS_ERR_SYSTEM; + } result = cache_mngt_check_bdev(&attach_cfg->device, attach_cfg->force, false, NULL); if (result) { + ocf_volume_destroy(attach_cfg->device.volume); module_put(THIS_MODULE); return result; } @@ -2719,6 +2734,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, cache_name_meta, &cache_mode_meta, &cache_line_size_meta); if (result) { + ocf_volume_destroy(attach_cfg->device.volume); module_put(THIS_MODULE); return result; } @@ -2729,6 +2745,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, printk(KERN_ERR "Improper cache name format on %s.\n", cmd->cache_path_name); + ocf_volume_destroy(attach_cfg->device.volume); module_put(THIS_MODULE); return -OCF_ERR_START_CACHE_FAIL; } @@ -2741,6 +2758,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, "already exists.\n", cache_name_meta); ocf_mngt_cache_put(tmp_cache); + ocf_volume_destroy(attach_cfg->device.volume); module_put(THIS_MODULE); return -OCF_ERR_CACHE_EXIST; } @@ -2755,6 +2773,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) { + ocf_volume_destroy(attach_cfg->device.volume); module_put(THIS_MODULE); return -ENOMEM; } @@ -2764,6 +2783,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, if (IS_ERR(context->rollback_thread)) { result = PTR_ERR(context->rollback_thread); kfree(context); + ocf_volume_destroy(attach_cfg->device.volume); module_put(THIS_MODULE); return result; } @@ -2779,6 +2799,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, if (result) { cas_lazy_thread_stop(context->rollback_thread); kfree(context); + ocf_volume_destroy(attach_cfg->device.volume); module_put(THIS_MODULE); return result; } @@ -2786,12 +2807,12 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, result = _cache_mngt_cache_priv_init(cache); if (result) - goto err; + goto err_deinit_config; context->priv_inited = true; result = _cache_mngt_start_queues(cache); if (result) - goto err; + goto err_deinit_config; cache_priv = ocf_cache_get_priv(cache); cache_priv->attach_context = context; @@ -2838,6 +2859,9 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, ocf_mngt_cache_unlock(cache); return result; + +err_deinit_config: + ocf_volume_destroy(attach_cfg->device.volume); err: cmd->min_free_ram = context->min_free_ram; @@ -3196,10 +3220,6 @@ int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush) if (status) return status; - cache_priv = ocf_cache_get_priv(cache); - mngt_queue = cache_priv->mngt_queue; - context = cache_priv->stop_context; - /* * Flush cache. Flushing may take a long time, so we allow user * to interrupt this operation. Hence we do first flush before @@ -3218,6 +3238,10 @@ int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush) if (status) goto put; + cache_priv = ocf_cache_get_priv(cache); + mngt_queue = cache_priv->mngt_queue; + context = cache_priv->stop_context; + context->finish_thread = cas_lazy_thread_create(exit_instance_finish, context, "cas_%s_stop", cache_name); if (IS_ERR(context->finish_thread)) { @@ -3504,22 +3528,6 @@ int cache_mngt_get_core_info(struct kcas_core_info *info) return result; } -static int cache_mngt_wait_for_rq_finish_visitor(ocf_core_t core, void *cntx) -{ - ocf_volume_t obj = ocf_core_get_volume(core); - struct bd_object *bdobj = bd_object(obj); - - while (atomic64_read(&bdobj->pending_rqs)) - io_schedule(); - - return 0; -} - -void cache_mngt_wait_for_rq_finish(ocf_cache_t cache) -{ - ocf_core_visit(cache, cache_mngt_wait_for_rq_finish_visitor, NULL, true); -} - int cache_mngt_set_core_params(struct kcas_set_core_param *info) { ocf_cache_t cache; diff --git a/modules/cas_cache/linux_kernel_version.h b/modules/cas_cache/linux_kernel_version.h index a9682fde0..41eda5b88 100644 --- a/modules/cas_cache/linux_kernel_version.h +++ b/modules/cas_cache/linux_kernel_version.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -47,23 +48,6 @@ #include #endif -#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 0, 0) - #include - #ifdef UTS_UBUNTU_RELEASE_ABI - #define CAS_UBUNTU - #endif -#endif - -/* - * For 8KB process kernel stack check if request is not continous and - * submit each bio as separate request. This prevent nvme driver from - * splitting requests. - * For large requests, nvme splitting causes stack overrun. - */ -#if THREAD_SIZE <= 8192 - #define RQ_CHECK_CONTINOUS -#endif - #ifndef SHRT_MIN #define SHRT_MIN ((s16)-32768) #endif @@ -74,16 +58,6 @@ #define ENOTSUP ENOTSUPP -#ifdef RHEL_RELEASE_VERSION - #if RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 3) - #define CAS_RHEL_73 - #endif -#endif - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) - #define CAS_GARBAGE_COLLECTOR -#endif - /* rate-limited printk */ #define CAS_PRINT_RL(...) \ if (printk_ratelimit()) \ diff --git a/modules/cas_cache/service_ui_ioctl.c b/modules/cas_cache/service_ui_ioctl.c index 0bd8e19ca..567decc6c 100644 --- a/modules/cas_cache/service_ui_ioctl.c +++ b/modules/cas_cache/service_ui_ioctl.c @@ -32,7 +32,7 @@ return map_cas_err_to_generic(ret); \ }) -/* this handles IOctl for /dev/cas */ +/* this handles IOctl for /dev/cas_ctrl */ /*********************************************/ long cas_service_ioctl_ctrl(struct file *filp, unsigned int cmd, unsigned long arg) @@ -86,6 +86,10 @@ long cas_service_ioctl_ctrl(struct file *filp, unsigned int cmd, GET_CMD_INFO(cmd_info, arg); + printk(KERN_ERR "Cache attach is not supported!\n"); + retval = -ENOTSUP; + RETURN_CMD_RESULT(cmd_info, arg, retval); + cache_name_from_id(cache_name, cmd_info->cache_id); retval = cache_mngt_attach_cache_cfg(cache_name, OCF_CACHE_NAME_SIZE, @@ -104,6 +108,9 @@ long cas_service_ioctl_ctrl(struct file *filp, unsigned int cmd, char cache_name[OCF_CACHE_NAME_SIZE]; GET_CMD_INFO(cmd_info, arg); + printk(KERN_ERR "Cache detach is not supported!\n"); + retval = -ENOTSUP; + RETURN_CMD_RESULT(cmd_info, arg, retval); cache_name_from_id(cache_name, cmd_info->cache_id); diff --git a/modules/cas_cache/volume/obj_blk.h b/modules/cas_cache/volume/obj_blk.h index 281446e6d..870698bfa 100644 --- a/modules/cas_cache/volume/obj_blk.h +++ b/modules/cas_cache/volume/obj_blk.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -25,12 +26,6 @@ struct bd_object { uint32_t opened_by_bdev : 1; /*!< Opened by supplying bdev manually */ - atomic64_t pending_rqs; - /*!< This fields describes in flight IO requests */ - - struct workqueue_struct *btm_wq; - /*< Workqueue for I/O internally trigerred in bottom vol */ - struct workqueue_struct *expobj_wq; /*< Workqueue for I/O handled by top vol */ diff --git a/modules/cas_cache/volume/vol_blk_utils.c b/modules/cas_cache/volume/vol_blk_utils.c index 6cb0736ac..314413b9b 100644 --- a/modules/cas_cache/volume/vol_blk_utils.c +++ b/modules/cas_cache/volume/vol_blk_utils.c @@ -192,42 +192,6 @@ uint32_t cas_io_iter_zero(struct bio_vec_iter *dst, uint32_t bytes) return zeroed; } -/* - * - */ -int cas_blk_io_set_data(struct ocf_io *io, - ctx_data_t *ctx_data, uint32_t offset) -{ - struct blkio *blkio = cas_io_to_blkio(io); - struct blk_data *data = ctx_data; - - /* Set BIO vector (IO data) and initialize iterator */ - blkio->data = data; - if (blkio->data) { - cas_io_iter_init(&blkio->iter, blkio->data->vec, - blkio->data->size); - - /* Move into specified offset in BIO vector iterator */ - if (offset != cas_io_iter_move(&blkio->iter, offset)) { - /* TODO Log message */ - blkio->error = -ENOBUFS; - return -ENOBUFS; - } - } - - return 0; -} - -/* - * - */ -ctx_data_t *cas_blk_io_get_data(struct ocf_io *io) -{ - struct blkio *blkio = cas_io_to_blkio(io); - - return blkio->data; -} - int cas_blk_open_volume_by_bdev(ocf_volume_t *vol, struct block_device *bdev) { struct bd_object *bdobj; diff --git a/modules/cas_cache/volume/vol_blk_utils.h b/modules/cas_cache/volume/vol_blk_utils.h index ddbad014a..9d8dea12d 100644 --- a/modules/cas_cache/volume/vol_blk_utils.h +++ b/modules/cas_cache/volume/vol_blk_utils.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,27 +10,6 @@ #include "obj_blk.h" #include "context.h" -struct blkio { - int error; - atomic_t rq_remaning; - atomic_t ref_counter; - int32_t dir; - - struct blk_data *data; /* IO data buffer */ - - /* BIO vector iterator for sending IO */ - struct bio_vec_iter iter; -}; - -static inline struct blkio *cas_io_to_blkio(struct ocf_io *io) -{ - return ocf_io_get_priv(io); -} - -int cas_blk_io_set_data(struct ocf_io *io, ctx_data_t *data, - uint32_t offset); -ctx_data_t *cas_blk_io_get_data(struct ocf_io *io); - int cas_blk_open_volume_by_bdev(ocf_volume_t *vol, struct block_device *bdev); void cas_blk_close_volume(ocf_volume_t vol); diff --git a/modules/cas_cache/volume/vol_block_dev_bottom.c b/modules/cas_cache/volume/vol_block_dev_bottom.c index 31927ba4d..6b6b3cc3f 100644 --- a/modules/cas_cache/volume/vol_block_dev_bottom.c +++ b/modules/cas_cache/volume/vol_block_dev_bottom.c @@ -86,15 +86,15 @@ static uint64_t block_dev_get_byte_length(ocf_volume_t vol) * */ static inline struct bio *cas_bd_io_alloc_bio(struct block_device *bdev, - struct blkio *bdio) + struct bio_vec_iter *iter) { struct bio *bio - = cas_bio_alloc(bdev, GFP_NOIO, cas_io_iter_size_left(&bdio->iter)); + = cas_bio_alloc(bdev, GFP_NOIO, cas_io_iter_size_left(iter)); if (bio) return bio; - if (cas_io_iter_size_left(&bdio->iter) < MAX_LINES_PER_IO) { + if (cas_io_iter_size_left(iter) < MAX_LINES_PER_IO) { /* BIO vector was small, so it was memory * common problem - NO RAM!!! */ @@ -106,251 +106,74 @@ static inline struct bio *cas_bd_io_alloc_bio(struct block_device *bdev, } /* - * + * Returns only flags that are relevant to request's direction. */ -static void cas_bd_io_end(struct ocf_io *io, int error) +static inline uint64_t filter_req_flags(int dir, uint64_t flags) { - struct blkio *bdio = cas_io_to_blkio(io); - - if (error) - bdio->error |= error; - - if (atomic_dec_return(&bdio->rq_remaning)) - return; - - CAS_DEBUG_MSG("Completion"); - - /* Send completion to caller */ - io->end(io, bdio->error); + /* Remove REQ_RAHEAD flag from write request to cache which are a + result of a missed read-head request. This flag caused the nvme + driver to send write command with access frequency value that is + reserved */ + if (dir == WRITE) + flags &= ~REQ_RAHEAD; + + return flags; } /* * */ -CAS_DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio, +CAS_DECLARE_BLOCK_CALLBACK(cas_bd_forward_end, struct bio *bio, unsigned int bytes_done, int error) { - struct ocf_io *io; - struct blkio *bdio; - struct bd_object *bdobj; + ocf_forward_token_t token; int err; - BUG_ON(!bio); - BUG_ON(!bio->bi_private); CAS_BLOCK_CALLBACK_INIT(bio); - io = bio->bi_private; - bdobj = bd_object(ocf_io_get_volume(io)); - BUG_ON(!bdobj); + token = (ocf_forward_token_t)bio->bi_private; err = CAS_BLOCK_CALLBACK_ERROR(bio, error); - bdio = cas_io_to_blkio(io); - BUG_ON(!bdio); CAS_DEBUG_TRACE(); if (err == -EOPNOTSUPP && (CAS_BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD)) err = 0; - cas_bd_io_end(io, err); + ocf_forward_end(token, err); bio_put(bio); CAS_BLOCK_CALLBACK_RETURN(); } -static void block_dev_submit_flush(struct ocf_io *io) -{ - struct blkio *blkio = cas_io_to_blkio(io); - struct bd_object *bdobj = bd_object(ocf_io_get_volume(io)); - struct block_device *bdev = bdobj->btm_bd; - struct request_queue *q = bdev_get_queue(bdev); - struct bio *bio = NULL; - - /* Prevent races of completing IO */ - atomic_set(&blkio->rq_remaning, 1); - - if (q == NULL) { - /* No queue, error */ - blkio->error = -EINVAL; - goto out; - } - - if (!CAS_CHECK_QUEUE_FLUSH(q)) { - /* This block device does not support flush, call back */ - goto out; - } - - bio = cas_bio_alloc(bdev, GFP_NOIO, 0); - if (bio == NULL) { - CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for BIO\n"); - blkio->error = -ENOMEM; - goto out; - } - - blkio->dir = io->dir; - - bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end); - CAS_BIO_SET_DEV(bio, bdev); - bio->bi_private = io; - - atomic_inc(&blkio->rq_remaning); - cas_submit_bio(CAS_SET_FLUSH(io->dir), bio); - -out: - cas_bd_io_end(io, blkio->error); -} - -static void block_dev_submit_discard(struct ocf_io *io) -{ - struct blkio *blkio = cas_io_to_blkio(io); - struct bd_object *bdobj = bd_object(ocf_io_get_volume(io)); - struct block_device *bd = bdobj->btm_bd; - struct request_queue *q = bdev_get_queue(bd); - struct bio *bio = NULL; - - unsigned int max_discard_sectors, granularity, bio_sects; - int alignment; - sector_t sects, start, end, tmp; - - /* Prevent races of completing IO */ - atomic_set(&blkio->rq_remaning, 1); - - if (!q) { - /* No queue, error */ - blkio->error = -ENXIO; - goto out; - } - - if (!cas_has_discard_support(bd)) { - /* Discard is not supported by bottom device, send completion - * to caller - */ - goto out; - } - - granularity = max(q->limits.discard_granularity >> SECTOR_SHIFT, 1U); - alignment = (bdev_discard_alignment(bd) >> SECTOR_SHIFT) % granularity; - max_discard_sectors = - min(q->limits.max_discard_sectors, UINT_MAX >> SECTOR_SHIFT); - max_discard_sectors -= max_discard_sectors % granularity; - if (unlikely(!max_discard_sectors)) - goto out; - - sects = io->bytes >> SECTOR_SHIFT; - start = io->addr >> SECTOR_SHIFT; - - while (sects) { - bio = cas_bio_alloc(bd, GFP_NOIO, 1); - if (!bio) { - CAS_PRINT_RL(CAS_KERN_ERR "Couldn't allocate memory for BIO\n"); - blkio->error = -ENOMEM; - break; - } - - bio_sects = min_t(sector_t, sects, max_discard_sectors); - end = start + bio_sects; - tmp = end; - if (bio_sects < sects && - sector_div(tmp, granularity) != alignment) { - end = end - alignment; - sector_div(end, granularity); - end = end * granularity + alignment; - bio_sects = end - start; - } - - CAS_BIO_SET_DEV(bio, bd); - CAS_BIO_BISECTOR(bio) = start; - CAS_BIO_BISIZE(bio) = bio_sects << SECTOR_SHIFT; - bio->bi_next = NULL; - bio->bi_private = io; - bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end); - - atomic_inc(&blkio->rq_remaning); - cas_submit_bio(CAS_BIO_DISCARD, bio); - - sects -= bio_sects; - start = end; - - cond_resched(); - } - -out: - cas_bd_io_end(io, blkio->error); -} - -static inline bool cas_bd_io_prepare(int *dir, struct ocf_io *io) -{ - struct blkio *bdio = cas_io_to_blkio(io); - - /* Setup DIR */ - bdio->dir = *dir; - - /* Convert CAS direction into kernel values */ - switch (bdio->dir) { - case OCF_READ: - *dir = READ; - break; - - case OCF_WRITE: - *dir = WRITE; - break; - - default: - bdio->error = -EINVAL; - break; - } - - if (!io->bytes) { - /* Don not accept empty request */ - CAS_PRINT_RL(KERN_ERR "Invalid zero size IO\n"); - bdio->error = -EINVAL; - } - - if (bdio->error) - return false; - - return true; -} -/* - * - */ -static void block_dev_submit_io(struct ocf_io *io) +static void block_dev_forward_io(ocf_volume_t volume, + ocf_forward_token_t token, int dir, uint64_t addr, + uint64_t bytes, uint64_t offset) { - struct blkio *bdio = cas_io_to_blkio(io); - struct bd_object *bdobj = bd_object(ocf_io_get_volume(io)); - struct bio_vec_iter *iter = &bdio->iter; - uint64_t addr = io->addr; - uint32_t bytes = io->bytes; - int dir = io->dir; + struct bd_object *bdobj = bd_object(volume); + struct blk_data *data = ocf_forward_get_data(token); + uint64_t flags = ocf_forward_get_flags(token); + int bio_dir = (dir == OCF_READ) ? READ : WRITE; + struct bio_vec_iter iter; struct blk_plug plug; - - if (CAS_IS_SET_FLUSH(io->flags)) { - CAS_DEBUG_MSG("Flush request"); - /* It is flush requests handle it */ - block_dev_submit_flush(io); - return; - } + int error = 0; CAS_DEBUG_PARAM("Address = %llu, bytes = %u\n", addr, bytes); - /* Prevent races of completing IO */ - atomic_set(&bdio->rq_remaning, 1); - - if (!cas_bd_io_prepare(&dir, io)) { - CAS_DEBUG_MSG("Invalid request"); - cas_bd_io_end(io, -EINVAL); + cas_io_iter_init(&iter, data->vec, data->size); + if (offset != cas_io_iter_move(&iter, offset)) { + ocf_forward_end(token, -OCF_ERR_INVAL); return; } blk_start_plug(&plug); - - while (cas_io_iter_is_next(iter) && bytes) { + while (cas_io_iter_is_next(&iter) && bytes) { /* Still IO vectors to be sent */ /* Allocate BIO */ - struct bio *bio = cas_bd_io_alloc_bio(bdobj->btm_bd, bdio); + struct bio *bio = cas_bd_io_alloc_bio(bdobj->btm_bd, &iter); if (!bio) { - bdio->error = -ENOMEM; + error = -ENOMEM; break; } @@ -358,15 +181,15 @@ static void block_dev_submit_io(struct ocf_io *io) CAS_BIO_SET_DEV(bio, bdobj->btm_bd); CAS_BIO_BISECTOR(bio) = addr / SECTOR_SIZE; bio->bi_next = NULL; - bio->bi_private = io; - CAS_BIO_OP_FLAGS(bio) |= io->flags; - bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end); + bio->bi_private = (void *)token; + CAS_BIO_OP_FLAGS(bio) |= filter_req_flags(bio_dir, flags); + bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_forward_end); /* Add pages */ - while (cas_io_iter_is_next(iter) && bytes) { - struct page *page = cas_io_iter_current_page(iter); - uint32_t offset = cas_io_iter_current_offset(iter); - uint32_t length = cas_io_iter_current_length(iter); + while (cas_io_iter_is_next(&iter) && bytes) { + struct page *page = cas_io_iter_current_page(&iter); + uint32_t offset = cas_io_iter_current_offset(&iter); + uint32_t length = cas_io_iter_current_length(&iter); int added; if (length > bytes) @@ -385,19 +208,19 @@ static void block_dev_submit_io(struct ocf_io *io) addr += added; /* Update BIO vector iterator */ - if (added != cas_io_iter_move(iter, added)) { - bdio->error = -ENOBUFS; + if (added != cas_io_iter_move(&iter, added)) { + error = -ENOBUFS; break; } } - if (bdio->error == 0) { + if (error == 0) { /* Increase IO reference for sending this IO */ - atomic_inc(&bdio->rq_remaning); + ocf_forward_get(token); /* Send BIO */ CAS_DEBUG_MSG("Submit IO"); - cas_submit_bio(dir, bio); + cas_submit_bio(bio_dir, bio); bio = NULL; } else { if (bio) { @@ -409,41 +232,146 @@ static void block_dev_submit_io(struct ocf_io *io) break; } } - blk_finish_plug(&plug); - if (bytes && bdio->error == 0) { + if (bytes && error == 0) { /* Not all bytes sent, mark error */ - bdio->error = -ENOBUFS; + error = -ENOBUFS; } /* Prevent races of completing IO when * there are still child IOs not being send. */ - cas_bd_io_end(io, 0); + ocf_forward_end(token, error); +} + +static void block_dev_forward_flush(ocf_volume_t volume, + ocf_forward_token_t token) +{ + struct bd_object *bdobj = bd_object(volume); + struct request_queue *q = bdev_get_queue(bdobj->btm_bd); + struct bio *bio; + + if (!q) { + /* No queue, error */ + ocf_forward_end(token, -OCF_ERR_INVAL); + return; + } + + if (!CAS_CHECK_QUEUE_FLUSH(q)) { + /* This block device does not support flush, call back */ + ocf_forward_end(token, 0); + return; + } + + bio = cas_bio_alloc(bdobj->btm_bd, GFP_NOIO, 0); + if (!bio) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for BIO\n"); + ocf_forward_end(token, -OCF_ERR_NO_MEM); + return; + } + + CAS_BIO_SET_DEV(bio, bdobj->btm_bd); + bio->bi_private = (void *)token; + bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_forward_end); + + cas_submit_bio(CAS_SET_FLUSH(0), bio); + +} + +static void block_dev_forward_discard(ocf_volume_t volume, + ocf_forward_token_t token, uint64_t addr, uint64_t bytes) +{ + struct bd_object *bdobj = bd_object(volume); + struct request_queue *q = bdev_get_queue(bdobj->btm_bd); + struct bio *bio; + int error = 0; + + unsigned int max_discard_sectors, granularity, bio_sects; + int alignment; + sector_t sects, start, end, tmp; + + if (!q) { + /* No queue, error */ + ocf_forward_end(token, -OCF_ERR_INVAL); + return; + } + + if (!cas_has_discard_support(bdobj->btm_bd)) { + /* Discard is not supported by bottom device, send completion + * to caller + */ + ocf_forward_end(token, 0); + return; + } + + granularity = max(q->limits.discard_granularity >> SECTOR_SHIFT, 1U); + alignment = (bdev_discard_alignment(bdobj->btm_bd) >> SECTOR_SHIFT) + % granularity; + max_discard_sectors = + min(q->limits.max_discard_sectors, UINT_MAX >> SECTOR_SHIFT); + max_discard_sectors -= max_discard_sectors % granularity; + if (unlikely(!max_discard_sectors)) { + ocf_forward_end(token, -OCF_ERR_INVAL); + return; + } + + sects = bytes >> SECTOR_SHIFT; + start = addr >> SECTOR_SHIFT; + + while (sects) { + bio = cas_bio_alloc(bdobj->btm_bd, GFP_NOIO, 1); + if (!bio) { + CAS_PRINT_RL(CAS_KERN_ERR "Couldn't allocate memory for BIO\n"); + error = -OCF_ERR_NO_MEM; + break; + } + + bio_sects = min_t(sector_t, sects, max_discard_sectors); + end = start + bio_sects; + tmp = end; + if (bio_sects < sects && + sector_div(tmp, granularity) != alignment) { + end = end - alignment; + sector_div(end, granularity); + end = end * granularity + alignment; + bio_sects = end - start; + } + + CAS_BIO_SET_DEV(bio, bdobj->btm_bd); + CAS_BIO_BISECTOR(bio) = start; + CAS_BIO_BISIZE(bio) = bio_sects << SECTOR_SHIFT; + bio->bi_next = NULL; + bio->bi_private = (void *)token; + bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_forward_end); + + ocf_forward_get(token); + cas_submit_bio(CAS_BIO_DISCARD, bio); + + sects -= bio_sects; + start = end; + + cond_resched(); + } + + ocf_forward_end(token, error); } const struct ocf_volume_properties cas_object_blk_properties = { .name = "Block_Device", - .io_priv_size = sizeof(struct blkio), .volume_priv_size = sizeof(struct bd_object), .caps = { .atomic_writes = 0, /* Atomic writes not supported */ }, .ops = { - .submit_io = block_dev_submit_io, - .submit_flush = block_dev_submit_flush, - .submit_metadata = NULL, - .submit_discard = block_dev_submit_discard, + .forward_io = block_dev_forward_io, + .forward_flush = block_dev_forward_flush, + .forward_discard = block_dev_forward_discard, .open = block_dev_open_object, .close = block_dev_close_object, .get_max_io_size = block_dev_get_max_io_size, .get_length = block_dev_get_byte_length, }, - .io_ops = { - .set_data = cas_blk_io_set_data, - .get_data = cas_blk_io_get_data, - }, .deinit = NULL, }; diff --git a/modules/cas_cache/volume/vol_block_dev_bottom.h b/modules/cas_cache/volume/vol_block_dev_bottom.h index a727f86e1..581e47466 100644 --- a/modules/cas_cache/volume/vol_block_dev_bottom.h +++ b/modules/cas_cache/volume/vol_block_dev_bottom.h @@ -6,7 +6,6 @@ #ifndef __VOL_BLOCK_DEV_BOTTOM_H__ #define __VOL_BLOCK_DEV_BOTTOM_H__ - int block_dev_init(void); #endif /* __VOL_BLOCK_DEV_BOTTOM_H__ */ diff --git a/modules/cas_cache/volume/vol_block_dev_top.c b/modules/cas_cache/volume/vol_block_dev_top.c index 7bb1b176c..be79dc05a 100644 --- a/modules/cas_cache/volume/vol_block_dev_top.c +++ b/modules/cas_cache/volume/vol_block_dev_top.c @@ -189,10 +189,11 @@ static void blkdev_complete_data_master(struct blk_data *master, int error) cas_free_blk_data(master); } -static void blkdev_complete_data(struct ocf_io *io, int error) +static void blkdev_complete_data(ocf_io_t io, void *priv1, void *priv2, + int error) { - struct bio *bio = io->priv1; - struct blk_data *master = io->priv2; + struct bio *bio = priv1; + struct blk_data *master = priv2; struct blk_data *data = ocf_io_get_data(io); ocf_io_put(io); @@ -217,7 +218,7 @@ static int blkdev_handle_data_single(struct bd_object *bvol, struct bio *bio, ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume); struct cache_priv *cache_priv = ocf_cache_get_priv(cache); ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()]; - struct ocf_io *io; + ocf_io_t io; struct blk_data *data; uint64_t flags = CAS_BIO_OP_FLAGS(bio); int ret; @@ -317,9 +318,10 @@ static void blkdev_handle_data(struct bd_object *bvol, struct bio *bio) CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error)); } -static void blkdev_complete_discard(struct ocf_io *io, int error) +static void blkdev_complete_discard(ocf_io_t io, void *priv1, void *priv2, + int error) { - struct bio *bio = io->priv1; + struct bio *bio = priv1; int result = map_cas_err_to_generic(error); CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(result)); @@ -331,7 +333,7 @@ static void blkdev_handle_discard(struct bd_object *bvol, struct bio *bio) ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume); struct cache_priv *cache_priv = ocf_cache_get_priv(cache); ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()]; - struct ocf_io *io; + ocf_io_t io; io = ocf_volume_new_io(bvol->front_volume, queue, CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT, @@ -356,10 +358,11 @@ static void blkdev_handle_bio_noflush(struct bd_object *bvol, struct bio *bio) blkdev_handle_data(bvol, bio); } -static void blkdev_complete_flush(struct ocf_io *io, int error) +static void blkdev_complete_flush(ocf_io_t io, void *priv1, void *priv2, + int error) { - struct bio *bio = io->priv1; - struct bd_object *bvol = io->priv2; + struct bio *bio = priv1; + struct bd_object *bvol = priv2; int result = map_cas_err_to_generic(error); ocf_io_put(io); @@ -370,10 +373,7 @@ static void blkdev_complete_flush(struct ocf_io *io, int error) return; } - if (in_interrupt()) - blkdev_defer_bio(bvol, bio, blkdev_handle_bio_noflush); - else - blkdev_handle_bio_noflush(bvol, bio); + blkdev_defer_bio(bvol, bio, blkdev_handle_bio_noflush); } static void blkdev_handle_flush(struct bd_object *bvol, struct bio *bio) @@ -381,7 +381,7 @@ static void blkdev_handle_flush(struct bd_object *bvol, struct bio *bio) ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume); struct cache_priv *cache_priv = ocf_cache_get_priv(cache); ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()]; - struct ocf_io *io; + ocf_io_t io; io = ocf_volume_new_io(bvol->front_volume, queue, 0, 0, OCF_WRITE, 0, CAS_SET_FLUSH(0)); diff --git a/modules/config.mk b/modules/config.mk index 0ac502e00..17c17961b 100644 --- a/modules/config.mk +++ b/modules/config.mk @@ -1,5 +1,6 @@ # # Copyright(c) 2012-2021 Intel Corporation +# Copyright(c) 2024 Huawei Technologies # SPDX-License-Identifier: BSD-3-Clause # @@ -14,7 +15,7 @@ EXTRA_CFLAGS += -DCAS_VERSION_MAIN=$(CAS_VERSION_MAIN) EXTRA_CFLAGS += -DCAS_VERSION_MAJOR=$(CAS_VERSION_MAJOR) EXTRA_CFLAGS += -DCAS_VERSION_MINOR=$(CAS_VERSION_MINOR) EXTRA_CFLAGS += -DCAS_VERSION=\"$(CAS_VERSION)\" -EXTRA_CFLAGS += -O2 -D_FORTIFY_SOURCE=2 -Wformat -Wformat-security +EXTRA_CFLAGS += -Ofast -D_FORTIFY_SOURCE=2 -Wformat -Wformat-security EXTRA_CFLAGS += -I$(M) EXTRA_CFLAGS += -I$(M)/cas_cache diff --git a/ocf b/ocf index 1fbb00de8..c97884878 160000 --- a/ocf +++ b/ocf @@ -1 +1 @@ -Subproject commit 1fbb00de8f40bab42eb5625053a61536c49382cb +Subproject commit c9788487868443dc6d0653ce7fc02690a51b6ff0 diff --git a/test/functional/api/cas/cache.py b/test/functional/api/cas/cache.py index 8efd00119..6087438c5 100644 --- a/test/functional/api/cas/cache.py +++ b/test/functional/api/cas/cache.py @@ -17,7 +17,6 @@ def __init__(self, device: Device, cache_id: int = None) -> None: self.cache_device = device self.cache_id = cache_id if cache_id else self.__get_cache_id() self.__cache_line_size = None - self.metadata_size_on_disk = self.get_metadata_size_on_disk() def __get_cache_id(self) -> int: device_path = self.__get_cache_device_path() diff --git a/test/functional/api/cas/cas_service.py b/test/functional/api/cas/cas_service.py index 5cb02a3c6..6984c80af 100644 --- a/test/functional/api/cas/cas_service.py +++ b/test/functional/api/cas/cas_service.py @@ -44,5 +44,5 @@ def set_cas_service_timeout(timeout: timedelta = timedelta(minutes=30)): def clear_cas_service_timeout(): - remove(opencas_drop_in_directory, force=True, recursive=True, ignore_errors=True) + remove(str(opencas_drop_in_directory), force=True, recursive=True, ignore_errors=True) reload_daemon() diff --git a/test/functional/api/cas/casadm.py b/test/functional/api/cas/casadm.py index 6b693a69f..799147a53 100644 --- a/test/functional/api/cas/casadm.py +++ b/test/functional/api/cas/casadm.py @@ -235,10 +235,10 @@ def get_param_cleaning_acp( def set_cache_mode( - cache_mode: CacheMode, cache_id: int, flush=None, shortcut: bool = False + cache_mode: CacheMode, cache_id: int, flush: bool = None, shortcut: bool = False ) -> Output: flush_cache = None - if flush: + if flush is not None: flush_cache = "yes" if flush else "no" output = TestRun.executor.run( set_cache_mode_cmd( @@ -508,7 +508,8 @@ def stop_all_caches() -> None: caches = get_caches() if not caches: return - for cache in caches: + # Running "cache stop" on the reversed list to resolve the multilevel cache stop problem + for cache in reversed(caches): stop_cache(cache_id=cache.cache_id, no_data_flush=True) @@ -516,5 +517,5 @@ def remove_all_detached_cores() -> None: from api.cas.casadm_parser import get_cas_devices_dict devices = get_cas_devices_dict() - for dev in devices["core_pool"]: - TestRun.executor.run(remove_detached_cmd(dev["device"])) + for dev in devices["core_pool"].values(): + TestRun.executor.run(remove_detached_cmd(dev["device_path"])) diff --git a/test/functional/api/cas/casadm_parser.py b/test/functional/api/cas/casadm_parser.py index 33ba03fdd..a21509a8d 100644 --- a/test/functional/api/cas/casadm_parser.py +++ b/test/functional/api/cas/casadm_parser.py @@ -76,6 +76,7 @@ def get_cas_devices_dict() -> dict: for device in device_list: if device["type"] == "cache": cache_id = int(device["id"]) + core_pool = False params = [ ("id", cache_id), ("device_path", device["disk"]), @@ -91,7 +92,7 @@ def get_cas_devices_dict() -> dict: ] if core_pool: params.append(("core_pool", device)) - devices["core_pool"][(cache_id, int(device["id"]))] = dict( + devices["core_pool"][device["disk"]] = dict( [(key, value) for key, value in params] ) else: @@ -99,6 +100,9 @@ def get_cas_devices_dict() -> dict: [(key, value) for key, value in params] ) + elif device["type"] == "core pool": + core_pool = True + return devices diff --git a/test/functional/api/cas/cli_help_messages.py b/test/functional/api/cas/cli_help_messages.py index b06c4b897..56c094fcc 100644 --- a/test/functional/api/cas/cli_help_messages.py +++ b/test/functional/api/cas/cli_help_messages.py @@ -274,11 +274,14 @@ r"-o --output-format \ Output format: \{table|csv\}", ] -help_help = [r"Usage: casadm --help", r"Print help"] +help_help = [ + r"Usage: casadm --help", + r"Print help", +] standby_help = [ - r"The command is not supported" + r"The command is not supported", ] zero_metadata_help = [ diff --git a/test/functional/api/cas/cli_messages.py b/test/functional/api/cas/cli_messages.py index 8168016ba..fc3104bcb 100644 --- a/test/functional/api/cas/cli_messages.py +++ b/test/functional/api/cas/cli_messages.py @@ -55,12 +55,12 @@ ] stop_cache_incomplete = [ - r"Error while removing cache \d+", + r"Error while stopping cache \d+", r"Cache is in incomplete state - at least one core is inactive", ] stop_cache_errors = [ - r"Removed cache \d+ with errors", + r"Stopped cache \d+ with errors", r"Error while writing to cache device", ] diff --git a/test/functional/api/cas/core.py b/test/functional/api/cas/core.py index ba2a11329..f298984b1 100644 --- a/test/functional/api/cas/core.py +++ b/test/functional/api/cas/core.py @@ -60,6 +60,7 @@ def get_io_class_statistics( ) -> CoreIoClassStats: return CoreIoClassStats( cache_id=self.cache_id, + core_id=self.core_id, filter=stat_filter, io_class_id=io_class_id, percentage_val=percentage_val, diff --git a/test/functional/api/cas/statistics.py b/test/functional/api/cas/statistics.py index 40142b73a..ca1622e6f 100644 --- a/test/functional/api/cas/statistics.py +++ b/test/functional/api/cas/statistics.py @@ -226,14 +226,14 @@ def __init__( class CacheConfigStats: def __init__(self, stats_dict): - self.cache_id = stats_dict["Cache Id"] + self.cache_id = int(stats_dict["Cache Id"]) self.cache_size = parse_value( value=stats_dict["Cache Size [4KiB Blocks]"], unit_type=UnitType.block_4k ) self.cache_dev = stats_dict["Cache Device"] self.exp_obj = stats_dict["Exported Object"] - self.core_dev = stats_dict["Core Devices"] - self.inactive_core_devices = stats_dict["Inactive Core Devices"] + self.core_dev = int(stats_dict["Core Devices"]) + self.inactive_core_devices = int(stats_dict["Inactive Core Devices"]) self.write_policy = stats_dict["Write Policy"] self.cleaning_policy = stats_dict["Cleaning Policy"] self.promotion_policy = stats_dict["Promotion Policy"] @@ -361,6 +361,18 @@ def __init__(self, stats_dict, percentage_val): self.free = parse_value(value=stats_dict[f"Free {unit}"], unit_type=unit) self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit) self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit) + if f"Inactive Occupancy {unit}" in stats_dict: + self.inactive_occupancy = parse_value( + value=stats_dict[f"Inactive Occupancy {unit}"], unit_type=unit + ) + if f"Inactive Clean {unit}" in stats_dict: + self.inactive_clean = parse_value( + value=stats_dict[f"Inactive Clean {unit}"], unit_type=unit + ) + if f"Inactive Dirty {unit}" in stats_dict: + self.inactive_dirty = parse_value( + value=stats_dict[f"Inactive Dirty {unit}"], unit_type=unit + ) def __str__(self): return ( diff --git a/test/functional/test-framework b/test/functional/test-framework index 521d1dd4c..42ebe34da 160000 --- a/test/functional/test-framework +++ b/test/functional/test-framework @@ -1 +1 @@ -Subproject commit 521d1dd4c9507bdd8976ac1ddde536fc2d993589 +Subproject commit 42ebe34da3e6e10f823c24db7954675978298fe0 diff --git a/test/functional/tests/basic/test_basic.py b/test/functional/tests/basic/test_basic.py index b3bfb1f60..2b77c05a1 100644 --- a/test/functional/tests/basic/test_basic.py +++ b/test/functional/tests/basic/test_basic.py @@ -6,16 +6,16 @@ import pytest +from api.cas import casadm from api.cas.cache_config import CacheMode, CacheLineSize from api.cas.casadm_params import OutputFormat from api.cas.cli import start_cmd -from core.test_run import TestRun -from api.cas import casadm from api.cas.cli_messages import ( check_stderr_msg, start_cache_on_already_used_dev, start_cache_with_existing_id, ) +from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools import fs_utils from test_tools.dd import Dd @@ -81,31 +81,31 @@ def test_negative_start_cache(): with TestRun.step("Start cache on the same device but with another ID"): try: - output = TestRun.executor.run( + output = TestRun.executor.run_expect_fail( start_cmd( cache_dev=cache_dev_1.path, cache_id="2", force=True, ) ) - TestRun.fail("Two caches started on same device") - except CmdException: if not check_stderr_msg(output, start_cache_on_already_used_dev): TestRun.fail(f"Received unexpected error message: {output.stderr}") + except CmdException: + TestRun.fail("Two caches started on same device") with TestRun.step("Start cache with the same ID on another cache device"): try: - output = TestRun.executor.run( + output = TestRun.executor.run_expect_fail( start_cmd( cache_dev=cache_dev_2.path, cache_id="1", force=True, ) ) - TestRun.fail("Two caches started with same ID") - except CmdException: if not check_stderr_msg(output, start_cache_with_existing_id): TestRun.fail(f"Received unexpected error message: {output.stderr}") + except CmdException: + TestRun.fail("Two caches started with same ID") @pytest.mark.CI diff --git a/test/functional/tests/cache_ops/test_cleaning_policy_operation.py b/test/functional/tests/cache_ops/test_cleaning_policy_operation.py index 362af7667..be6cf25b4 100644 --- a/test/functional/tests/cache_ops/test_cleaning_policy_operation.py +++ b/test/functional/tests/cache_ops/test_cleaning_policy_operation.py @@ -130,6 +130,7 @@ def test_cleaning_policies_in_write_through(cleaning_policy): with TestRun.step(f"Start cache in Write-Through mode with {cleaning_policy} cleaning policy"): cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WT, force=True) + cache.set_cleaning_policy(cleaning_policy=cleaning_policy) set_cleaning_policy_params(cache, cleaning_policy) with TestRun.step("Check for running CAS cleaner"): @@ -256,7 +257,7 @@ def check_cleaning_policy_operation( case CleaningPolicy.nop: if ( core_writes_after_wait_for_cleaning != Size.zero() - or core_writes_before_wait_for_cleaning.value != Size.zero() + or core_writes_before_wait_for_cleaning != Size.zero() ): TestRun.LOGGER.error( "NOP cleaning policy is not working properly! " diff --git a/test/functional/tests/cli/test_cli_help_and_version.py b/test/functional/tests/cli/test_cli_help_and_version.py index 231597308..24eebe8b3 100644 --- a/test/functional/tests/cli/test_cli_help_and_version.py +++ b/test/functional/tests/cli/test_cli_help_and_version.py @@ -1,5 +1,6 @@ # # Copyright(c) 2020-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -60,7 +61,7 @@ def test_cli_help(shortcut): output = TestRun.executor.run("casadm" + (" -L" if shortcut else " --list-caches") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, list_help) + check_stdout_msg(output, list_caches_help) output = TestRun.executor.run("casadm" + (" -P" if shortcut else " --stats") + (" -H" if shortcut else " --help")) diff --git a/test/functional/tests/cli/test_zero_metadata_command.py b/test/functional/tests/cli/test_zero_metadata_command.py index f3fbf04d1..0fed8af08 100644 --- a/test/functional/tests/cli/test_zero_metadata_command.py +++ b/test/functional/tests/cli/test_zero_metadata_command.py @@ -1,5 +1,6 @@ # # Copyright(c) 2021 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # import time @@ -221,7 +222,7 @@ def test_zero_metadata_dirty_shutdown(): TestRun.LOGGER.info("This could ended with error (expected)") with TestRun.step("Plug cache device."): - cache_disk.plug() + cache_disk.plug_all() time.sleep(1) with TestRun.step("Start cache (expect to fail)."): diff --git a/test/functional/tests/conftest.py b/test/functional/tests/conftest.py index 5e67e6b68..fb3be0dca 100644 --- a/test/functional/tests/conftest.py +++ b/test/functional/tests/conftest.py @@ -243,7 +243,7 @@ def base_prepare(item): except Exception: pass # TODO: Reboot DUT if test is executed remotely - remove(opencas_drop_in_directory, recursive=True, ignore_errors=True) + remove(str(opencas_drop_in_directory), recursive=True, ignore_errors=True) from storage_devices.drbd import Drbd if Drbd.is_installed(): diff --git a/test/functional/tests/data_integrity/test_data_integrity_unplug.py b/test/functional/tests/data_integrity/test_data_integrity_unplug.py index 74d7f398c..3e6ba6c97 100644 --- a/test/functional/tests/data_integrity/test_data_integrity_unplug.py +++ b/test/functional/tests/data_integrity/test_data_integrity_unplug.py @@ -1,5 +1,6 @@ # # Copyright(c) 2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -130,7 +131,7 @@ async def test_data_integrity_unplug(cache_mode): raise with TestRun.step("Plug back the cache device"): - cache_dev.plug() + cache_dev.plug_all() with TestRun.step("Load cache"): cache = casadm.load_cache(cache_dev) diff --git a/test/functional/tests/fault_injection/test_fault_injection_many_to_one.py b/test/functional/tests/fault_injection/test_fault_injection_many_to_one.py index 930575e86..0518355d1 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_many_to_one.py +++ b/test/functional/tests/fault_injection/test_fault_injection_many_to_one.py @@ -1,5 +1,6 @@ # # Copyright(c) 2020-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -227,7 +228,7 @@ def test_one_core_fail(cache_mode): casadm.stop_all_caches() with TestRun.step("Plug back the first core."): - core_dev1.plug() + core_dev1.plug_all() @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @@ -311,7 +312,7 @@ def test_one_core_fail_dirty(): casadm.stop_all_caches() with TestRun.step("Plug back the first core."): - core_dev1.plug() + core_dev1.plug_all() def dd_builder(cache_mode: CacheMode, dev: Core, size: Size): diff --git a/test/functional/tests/fault_injection/test_soft_hot_plug_device.py b/test/functional/tests/fault_injection/test_soft_hot_plug_device.py index 1acaf69fb..31b0c0179 100644 --- a/test/functional/tests/fault_injection/test_soft_hot_plug_device.py +++ b/test/functional/tests/fault_injection/test_soft_hot_plug_device.py @@ -1,5 +1,6 @@ # # Copyright(c) 2019-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -92,10 +93,10 @@ def test_soft_hot_unplug_cache(cache_mode): casadm.stop_all_caches() with TestRun.step("Plug back cache device"): - cache_dev.plug() + cache_dev.plug_all() -@pytest.mark.parametrizex("cache_mode", CacheMode) +@pytest.mark.parametrizex("cache_mode", [CacheMode.WO, CacheMode.WB]) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core1", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core2", DiskTypeLowerThan("cache")) @@ -197,7 +198,7 @@ def test_soft_hot_unplug_core(cache_mode): casadm.stop_all_caches() with TestRun.step("Plug back core device"): - core_dev_unplugged.plug() + core_dev_unplugged.plug_all() def fio_prepare(core): diff --git a/test/functional/tests/incremental_load/test_incremental_load.py b/test/functional/tests/incremental_load/test_incremental_load.py index 0e81764d5..ae6e6b53f 100644 --- a/test/functional/tests/incremental_load/test_incremental_load.py +++ b/test/functional/tests/incremental_load/test_incremental_load.py @@ -205,7 +205,7 @@ def test_attach_core_to_incomplete_cache_volume(): TestRun.fail("Core should be in inactive state.") with TestRun.step("Plug core device."): - plug_device.plug() + plug_device.plug_all() time.sleep(1) with TestRun.step("Check if core status changed to active and CAS device is visible in OS."): @@ -228,6 +228,10 @@ def test_flush_inactive_devices(): - Flushing inactive CAS devices is possible neither by cleaning thread, nor by calling cleaning methods """ + staleness_time = Time(seconds=10) + wake_up_time = Time(seconds=1) + activity_threshold = Time(milliseconds=500) + with TestRun.step("Prepare devices."): devices = prepare_devices([("cache", 1), ("core1", 1), ("core2", 1)]) cache_dev = devices["cache"].partitions[0] @@ -240,9 +244,9 @@ def test_flush_inactive_devices(): cache.set_cleaning_policy(CleaningPolicy.alru) cache.set_params_alru( FlushParametersAlru( - staleness_time=Time(seconds=10), - wake_up_time=Time(seconds=1), - activity_threshold=Time(milliseconds=500), + staleness_time=staleness_time, + wake_up_time=wake_up_time, + activity_threshold=activity_threshold, ) ) @@ -307,7 +311,7 @@ def test_flush_inactive_devices(): check_amount_of_dirty_data(dirty_lines_before) with TestRun.step("Plug core disk and verify that this change is reflected on the cache list."): - plug_device.plug() + plug_device.plug_all() time.sleep(1) first_core.wait_for_status_change(CoreStatus.active) cache_status = cache.get_status() @@ -377,7 +381,7 @@ def test_list_cache_and_cache_volumes(): TestRun.fail(f"Cache should be in incomplete state. Actual state: {cache_status}.") with TestRun.step("Plug missing device and stop cache."): - plug_device.plug() + plug_device.plug_all() time.sleep(1) core.wait_for_status_change(CoreStatus.active) cache_status = cache.get_status() @@ -425,7 +429,7 @@ def test_load_cache_with_inactive_core(): cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing) with TestRun.step("Plug missing device and stop cache."): - plug_device.plug() + plug_device.plug_all() time.sleep(1) core.wait_for_status_change(CoreStatus.active) cache_status = cache.get_status() @@ -514,7 +518,7 @@ def test_preserve_data_for_inactive_device(): with TestRun.step( "Plug core disk using sysfs and verify this change is reflected " "on the cache list." ): - plug_device.plug() + plug_device.plug_all() time.sleep(1) if cache.get_status() != CacheStatus.running or core.get_status() != CoreStatus.active: TestRun.fail( @@ -621,7 +625,8 @@ def test_print_statistics_inactive(cache_mode): check_number_of_inactive_devices(inactive_stats_before, 2) with TestRun.step("Attach one of detached core devices and add it to cache."): - first_plug_device.plug() + first_plug_device.plug_all() + second_plug_device.unplug() time.sleep(1) first_core_status = first_core.get_status() if first_core_status != CoreStatus.active: @@ -639,21 +644,21 @@ def test_print_statistics_inactive(cache_mode): lazy_write_traits = CacheModeTrait.LazyWrites in cache_mode_traits lazy_writes_or_no_insert_write_traits = not insert_write_traits or lazy_write_traits - check_inactive_usage_stats( - inactive_stats_before.inactive_usage_stats.inactive_occupancy, - inactive_stats_after.inactive_usage_stats.inactive_occupancy, + check_usage_stats( + inactive_stats_before.usage_stats.inactive_occupancy, + inactive_stats_after.usage_stats.inactive_occupancy, "inactive occupancy", not insert_write_traits, ) - check_inactive_usage_stats( - inactive_stats_before.inactive_usage_stats.inactive_clean, - inactive_stats_after.inactive_usage_stats.inactive_clean, + check_usage_stats( + inactive_stats_before.usage_stats.inactive_clean, + inactive_stats_after.usage_stats.inactive_clean, "inactive clean", lazy_writes_or_no_insert_write_traits, ) - check_inactive_usage_stats( - inactive_stats_before.inactive_usage_stats.inactive_dirty, - inactive_stats_after.inactive_usage_stats.inactive_dirty, + check_usage_stats( + inactive_stats_before.usage_stats.inactive_dirty, + inactive_stats_after.usage_stats.inactive_dirty, "inactive dirty", not lazy_write_traits, ) @@ -661,7 +666,7 @@ def test_print_statistics_inactive(cache_mode): with TestRun.step("Check statistics per inactive core."): inactive_core_stats = second_core.get_statistics() if ( - inactive_stats_after.inactive_usage_stats.inactive_occupancy + inactive_stats_after.usage_stats.inactive_occupancy == inactive_core_stats.usage_stats.occupancy ): TestRun.LOGGER.info( @@ -671,7 +676,7 @@ def test_print_statistics_inactive(cache_mode): TestRun.fail( f"Inactive core occupancy ({inactive_core_stats.usage_stats.occupancy}) " f"should be the same as cache inactive occupancy " - f"({inactive_stats_after.inactive_usage_stats.inactive_occupancy})." + f"({inactive_stats_after.usage_stats.inactive_occupancy})." ) with TestRun.step("Remove inactive core from cache and check if cache is in running state."): @@ -692,7 +697,7 @@ def test_print_statistics_inactive(cache_mode): check_number_of_inactive_devices(cache_stats, 0) with TestRun.step("Plug missing disk and stop cache."): - second_plug_device.plug() + second_plug_device.plug_all() time.sleep(1) cache.stop() @@ -743,7 +748,7 @@ def test_remove_detached_cores(): with TestRun.step("Unplug core device from system and plug it back."): plug_device.unplug() time.sleep(2) - plug_device.plug() + plug_device.plug_all() time.sleep(1) with TestRun.step( @@ -891,7 +896,7 @@ def test_remove_inactive_devices(): core.remove_inactive(force=True) with TestRun.step("Plug missing disk and stop cache."): - plug_device.plug() + plug_device.plug_all() time.sleep(1) casadm.stop_all_caches() @@ -951,7 +956,7 @@ def test_stop_cache_with_inactive_devices(): cache.stop(no_data_flush=True) with TestRun.step("Plug missing core device."): - plug_device.plug() + plug_device.plug_all() time.sleep(1) with TestRun.step("Load cache."): @@ -977,7 +982,7 @@ def test_stop_cache_with_inactive_devices(): with TestRun.step("Stop cache with 'no data flush' option and plug missing core device."): cache.stop(no_data_flush=True) - plug_device.plug() + plug_device.plug_all() # Methods used in tests: @@ -989,7 +994,7 @@ def try_stop_incomplete_cache(cache): cli_messages.check_stderr_msg(e.output, cli_messages.stop_cache_incomplete) -def check_inactive_usage_stats(stats_before, stats_after, stat_name, should_be_zero): +def check_usage_stats(stats_before, stats_after, stat_name, should_be_zero): if should_be_zero and stats_before == Size.zero() and stats_after == Size.zero(): TestRun.LOGGER.info(f"{stat_name} value before and after equals 0 as expected.") elif not should_be_zero and stats_after < stats_before: @@ -1001,7 +1006,7 @@ def check_inactive_usage_stats(stats_before, stats_after, stat_name, should_be_z def check_number_of_inactive_devices(stats: CacheStats, expected_num): - inactive_core_num = stats.config_stats.inactive_core_dev + inactive_core_num = stats.config_stats.inactive_core_devices if inactive_core_num != expected_num: TestRun.fail( f"There is wrong number of inactive core devices in cache statistics. " @@ -1011,9 +1016,9 @@ def check_number_of_inactive_devices(stats: CacheStats, expected_num): def check_if_inactive_section_exists(stats, should_exist: bool = True): TestRun.LOGGER.info(str(stats)) - if not should_exist and hasattr(stats, "inactive_usage_stats"): + if not should_exist and "inactive_occupancy" in stats.usage_stats: TestRun.fail("There is an inactive section in cache usage statistics.") - elif should_exist and not hasattr(stats, "inactive_usage_stats"): + elif should_exist and "inactive_occupancy" not in stats.usage_stats: TestRun.fail("There is no inactive section in cache usage statistics.") diff --git a/test/functional/tests/incremental_load/test_udev.py b/test/functional/tests/incremental_load/test_udev.py index 9b49ddb14..48b46ee3d 100644 --- a/test/functional/tests/incremental_load/test_udev.py +++ b/test/functional/tests/incremental_load/test_udev.py @@ -1,5 +1,6 @@ # # Copyright(c) 2020-2021 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -35,15 +36,17 @@ def test_udev_core_partition(): """ cores_count = 4 - with TestRun.step("Create four partitions on core device and one on cache device."): + with TestRun.step("Prepare cache and core devices"): cache_disk = TestRun.disks["cache"] - cache_disk.create_partitions([Size(1, Unit.GibiByte)]) - cache_dev = cache_disk.partitions[0] core_disk = TestRun.disks["core"] + + cache_disk.create_partitions([Size(1, Unit.GibiByte)]) core_disk.create_partitions([Size(2, Unit.GibiByte)] * cores_count) + + cache_dev = cache_disk.partitions[0] core_devices = core_disk.partitions - with TestRun.step("Start cache and add created partitions as cores."): + with TestRun.step("Start cache and add cores"): cache = casadm.start_cache(cache_dev, force=True) for dev in core_devices: cache.add_core(dev) @@ -58,7 +61,7 @@ def test_udev_core_partition(): core_disk.unplug() with TestRun.step("Plug missing core disk."): - core_disk.plug() + core_disk.plug_all() time.sleep(1) with TestRun.step("List cache devices and check that created partitions are present " @@ -82,37 +85,42 @@ def test_udev_core(): - Core devices are listed in core pool when cache is not available - Core devices are moved from core pool and attached to cache after plugging cache device """ - with TestRun.step("Start cache and add core."): + + with TestRun.step("Prepare cache and core devices"): cache_disk = TestRun.disks["cache"] - cache_disk.create_partitions([Size(1, Unit.GibiByte)]) - cache_dev = cache_disk.partitions[0] core_disk = TestRun.disks["core"] + + cache_disk.create_partitions([Size(1, Unit.GibiByte)]) core_disk.create_partitions([Size(2, Unit.GibiByte)]) + + cache_dev = cache_disk.partitions[0] core_dev = core_disk.partitions[0] + + with TestRun.step("Start cache and add core"): cache = casadm.start_cache(cache_dev, force=True) core = cache.add_core(core_dev) - with TestRun.step("Create init config from running CAS configuration."): + with TestRun.step("Create init config from running CAS configuration"): InitConfig.create_init_config_from_running_configuration() - with TestRun.step("Stop cache."): + with TestRun.step("Stop cache"): cache.stop() - with TestRun.step("Unplug core disk."): + with TestRun.step("Unplug core disk"): core_disk.unplug() - with TestRun.step("Plug core disk."): - core_disk.plug() + with TestRun.step("Plug core disk"): + core_disk.plug_all() time.sleep(1) - with TestRun.step("Check if core device is listed in core pool."): + with TestRun.step("Check if core device is listed in core pool"): check_if_dev_in_core_pool(core_dev) with TestRun.step("Unplug cache disk."): cache_disk.unplug() with TestRun.step("Plug cache disk."): - cache_disk.plug() + cache_disk.plug_all() with TestRun.step("Check if core device is active and not in the core pool."): check_if_dev_in_core_pool(core_dev, False) @@ -199,7 +207,7 @@ def test_udev_cache_load(cache_mode): cache_disk.unplug() with TestRun.step("Plug cache disk."): - cache_disk.plug() + cache_disk.plug_all() time.sleep(1) with TestRun.step("List caches and check if cache is loaded."): @@ -263,7 +271,7 @@ def test_neg_udev_cache_load(): with TestRun.step("Unplug and plug cache disk."): cache_disk.unplug() - cache_disk.plug() + cache_disk.plug_all() time.sleep(1) with TestRun.step("Check if CAS is loaded correctly."): @@ -274,7 +282,7 @@ def test_neg_udev_cache_load(): if len(cas_devices["caches"]) != 1: TestRun.LOGGER.error(f"There is wrong number of caches. Expected: 1, actual: " f"{len(cas_devices['caches'])}") - elif cas_devices["caches"][1]["device"] != cache_disk.partitions[0].path or \ + elif cas_devices["caches"][1]["device_path"] != cache_disk.partitions[0].path or \ CacheStatus[(cas_devices["caches"][1]["status"]).lower()] != CacheStatus.running: TestRun.LOGGER.error(f"Cache did not load properly: {cas_devices['caches'][1]}") if len(cas_devices["cores"]) != 2: @@ -285,14 +293,14 @@ def test_neg_udev_cache_load(): for i in first_cache_core_numbers: correct_core_devices.append(core_disk.partitions[i].path) for core in cas_devices["cores"].values(): - if core["device"] not in correct_core_devices or \ + if core["device_path"] not in correct_core_devices or \ CoreStatus[core["status"].lower()] != CoreStatus.active or \ core["cache_id"] != 1: TestRun.LOGGER.error(f"Core did not load correctly: {core}.") with TestRun.step("Unplug and plug core disk."): core_disk.unplug() - core_disk.plug() + core_disk.plug_all() time.sleep(1) with TestRun.step("Check if two cores assigned to not loaded cache are inserted to core pool."): @@ -304,14 +312,16 @@ def test_neg_udev_cache_load(): for i in range(0, cores_count): if i not in first_cache_core_numbers: core_pool_expected_devices.append(core_disk.partitions[i].path) - for c in cas_devices["core_pool"]: - if c["device"] not in core_pool_expected_devices: + core_pool = cas_devices["core_pool"] + for c in core_pool.values(): + if c["device_path"] not in core_pool_expected_devices: TestRun.LOGGER.error(f"Wrong core device added to core pool: {c}.") def check_if_dev_in_core_pool(dev, should_be_in_core_pool=True): cas_devices_dict = casadm_parser.get_cas_devices_dict() - is_in_core_pool = any(dev.path == d["device"] for d in cas_devices_dict["core_pool"]) + is_in_core_pool = any(dev.path == d["device_path"] + for d in cas_devices_dict["core_pool"].values()) if not (should_be_in_core_pool ^ is_in_core_pool): TestRun.LOGGER.info(f"Core device {dev.path} is" f"{'' if should_be_in_core_pool else ' not'} listed in core pool " diff --git a/test/functional/tests/initialize/test_clean_reboot.py b/test/functional/tests/initialize/test_clean_reboot.py index 8006b3d91..182145ef3 100644 --- a/test/functional/tests/initialize/test_clean_reboot.py +++ b/test/functional/tests/initialize/test_clean_reboot.py @@ -1,9 +1,10 @@ # # Copyright(c) 2020-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # -import os +import posixpath import pytest from api.cas import casadm @@ -17,9 +18,6 @@ from test_utils.size import Size, Unit -mount_point = "/mnt/test" - - @pytest.mark.os_dependent @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @@ -29,52 +27,66 @@ @pytest.mark.require_plugin("power_control") def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem): """ - title: Planned system shutdown test. - description: Test for data consistency after clean system shutdown. - pass_criteria: - - DUT should reboot successfully. - - Checksum of file on core device should be the same before and after reboot. + title: Planned system shutdown test. + description: | + Test for data consistency after clean system shutdown. + pass_criteria: + - DUT reboot successful. + - Checksum of file on core device should be the same before and after reboot. """ - with TestRun.step("Prepare CAS device."): - cache_disk = TestRun.disks['cache'] + mount_point = "/mnt/test" + + with TestRun.step("Prepare cache and core devices"): + cache_disk = TestRun.disks["cache"] + core_dev = TestRun.disks["core"] + cache_disk.create_partitions([Size(1, Unit.GibiByte)]) + cache_dev = cache_disk.partitions[0] - core_dev = TestRun.disks['core'] + + with TestRun.step("Start cache and add core"): cache = casadm.start_cache(cache_dev, cache_mode, force=True) core = cache.add_core(core_dev) + + with TestRun.step("Create filesystem on the core device and mount it"): core.create_filesystem(filesystem, blocksize=int(Size(1, Unit.Blocks4096))) core.mount(mount_point) - with TestRun.step("Create file on cache and count its checksum."): - test_file = File(os.path.join(mount_point, "test_file")) - Dd()\ - .input("/dev/zero")\ - .output(test_file.full_path)\ - .block_size(Size(1, Unit.KibiByte))\ - .count(1024)\ - .run() + with TestRun.step("Create file on exported object"): + test_file = File(posixpath.join(mount_point, "test_file")) + + dd = ( + Dd() + .input("/dev/zero") + .output(test_file.full_path) + .block_size(Size(1, Unit.KibiByte)) + .count(1024) + ) + dd.run() + + with TestRun.step("Calculate test file md5sums before reboot"): test_file.refresh_item() test_file_md5 = test_file.md5sum() sync() drop_caches(DropCachesMode.ALL) - with TestRun.step("Reset platform."): + with TestRun.step("Reset platform"): if reboot_type == "soft": TestRun.executor.reboot() else: - power_control = TestRun.plugin_manager.get_plugin('power_control') + power_control = TestRun.plugin_manager.get_plugin("power_control") power_control.power_cycle() - with TestRun.step("Load cache."): + with TestRun.step("Load cache and mount core"): casadm.load_cache(cache_dev) core.mount(mount_point) - with TestRun.step("Check file md5sum."): + with TestRun.step("Compare test file md5sums"): test_file.refresh_item() if test_file_md5 != test_file.md5sum(): TestRun.LOGGER.error("Checksums does not match - file is corrupted.") else: TestRun.LOGGER.info("File checksum is correct.") - with TestRun.step("Remove test file."): + with TestRun.step("Remove test file"): test_file.remove() diff --git a/test/functional/tests/initialize/test_initialize_status.py b/test/functional/tests/initialize/test_initialize_status.py index 5e06d3fab..7f24192ff 100644 --- a/test/functional/tests/initialize/test_initialize_status.py +++ b/test/functional/tests/initialize/test_initialize_status.py @@ -1,9 +1,10 @@ # # Copyright(c) 2020-2021 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # -import time +import time import pytest from api.cas import cas_module, casctl @@ -15,27 +16,28 @@ @pytest.mark.os_dependent def test_init_status(): """ - title: CAS management device status - description: | - Verify that CAS management device is present in OS only when CAS modules are loaded. - pass_criteria: - - CAS management device present in OS when CAS modules are loaded. - - CAS management device not present in OS when CAS modules are not loaded. + title: CAS management device status + description: | + Verify that CAS management device is present in OS only when CAS modules are loaded. + pass_criteria: + - CAS management device present in OS when CAS modules are loaded. + - CAS management device not present in OS when CAS modules are not loaded. """ - with TestRun.step("Check if CAS management device is present in OS."): + + with TestRun.step("Check if CAS management device is present in OS"): time.sleep(5) if cas_module.is_cas_management_dev_present(): TestRun.LOGGER.info("CAS management device is present in OS when CAS module is loaded.") else: TestRun.fail("CAS management device is not present in OS when CAS module is loaded.") - with TestRun.step("Remove CAS module."): + with TestRun.step("Remove CAS module"): cas_module.unload_all_cas_modules() - with TestRun.step("Stop CAS service."): + with TestRun.step("Stop CAS service"): casctl.stop() - with TestRun.step("Check if CAS management device is not present in OS."): + with TestRun.step("Check if CAS management device is not present in OS"): time.sleep(5) if not cas_module.is_cas_management_dev_present(): TestRun.LOGGER.info( @@ -43,7 +45,6 @@ def test_init_status(): else: TestRun.fail("CAS management device is present in OS when CAS module is not loaded.") - with TestRun.step("Load CAS modules and start CAS service."): + with TestRun.step("Load CAS modules and start CAS service"): os_utils.load_kernel_module(CasModule.cache.value) - os_utils.load_kernel_module(CasModule.disk.value) casctl.start() diff --git a/test/functional/tests/lazy_writes/recovery/test_recovery_unplug.py b/test/functional/tests/lazy_writes/recovery/test_recovery_unplug.py index e212b91ea..f77606683 100644 --- a/test/functional/tests/lazy_writes/recovery/test_recovery_unplug.py +++ b/test/functional/tests/lazy_writes/recovery/test_recovery_unplug.py @@ -1,5 +1,6 @@ # # Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -89,7 +90,7 @@ def test_recovery_unplug_cache_fs(cache_mode, cls, filesystem, direct): with TestRun.step("Plug missing cache device."): TestRun.LOGGER.info(str(casadm.list_caches(by_id_path=False))) - cache_disk.plug() + cache_disk.plug_all() with TestRun.step("Load cache."): cache = casadm.load_cache(cache_device) @@ -174,7 +175,7 @@ def test_recovery_unplug_cache_raw(cache_mode, cls): with TestRun.step("Plug missing cache device."): TestRun.LOGGER.info(str(casadm.list_caches(by_id_path=False))) - cache_disk.plug() + cache_disk.plug_all() with TestRun.step("Load cache."): cache = casadm.load_cache(cache_device) diff --git a/test/functional/tests/security/fuzzy/kernel/fuzzy_stop_remove/test_fuzzy_remove_inactive_cache_id.py b/test/functional/tests/security/fuzzy/kernel/fuzzy_stop_remove/test_fuzzy_remove_inactive_cache_id.py index a4b22180d..28ae91955 100644 --- a/test/functional/tests/security/fuzzy/kernel/fuzzy_stop_remove/test_fuzzy_remove_inactive_cache_id.py +++ b/test/functional/tests/security/fuzzy/kernel/fuzzy_stop_remove/test_fuzzy_remove_inactive_cache_id.py @@ -1,5 +1,6 @@ # # Copyright(c) 2024 Huawei Technologies Co., Ltd. +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -86,7 +87,7 @@ def test_fuzzy_remove_inactive_cache_id( ) if output.exit_code == 0: with TestRun.step("Reload cache with inactive core"): - core_disk.plug() + core_disk.plug_all() cache.add_core(core_dev=core_disk) InitConfig.create_init_config_from_running_configuration() cache.stop(no_data_flush=True) diff --git a/test/functional/tests/security/fuzzy/kernel/fuzzy_stop_remove/test_fuzzy_remove_inactive_core_id.py b/test/functional/tests/security/fuzzy/kernel/fuzzy_stop_remove/test_fuzzy_remove_inactive_core_id.py index f35abdd08..986502483 100644 --- a/test/functional/tests/security/fuzzy/kernel/fuzzy_stop_remove/test_fuzzy_remove_inactive_core_id.py +++ b/test/functional/tests/security/fuzzy/kernel/fuzzy_stop_remove/test_fuzzy_remove_inactive_core_id.py @@ -86,7 +86,7 @@ def test_fuzzy_remove_inactive_core_id( ) if output.exit_code == 0: with TestRun.step("Reload cache with inactive core"): - core_disk.plug() + core_disk.plug_all() cache.add_core(core_dev=core_disk) InitConfig.create_init_config_from_running_configuration() cache.stop(no_data_flush=True) diff --git a/tools/cas_version_gen.sh b/tools/cas_version_gen.sh index aa33fceb0..0badf68dc 100755 --- a/tools/cas_version_gen.sh +++ b/tools/cas_version_gen.sh @@ -1,6 +1,7 @@ #!/bin/bash # # Copyright(c) 2020-2021 Intel Corporation +# Copyright(c) 2024 Huawei Technologies # SPDX-License-Identifier: BSD-3-Clause # @@ -30,13 +31,19 @@ if [[ -d "$SOURCES_DIR/.git" ]] && which git &>/dev/null &&\ if [[ ! -r "$MANUAL_VERSION_INPUT" ]]; then error "can't read version input file '$MANUAL_VERSION_INPUT'" fi - . "$MANUAL_VERSION_INPUT" + source "$MANUAL_VERSION_INPUT" if [[ ! "$CAS_VERSION_MAIN" || ! "$CAS_VERSION_MAJOR" || ! "$CAS_VERSION_MINOR" ]]; then error "'$MANUAL_VERSION_INPUT' - wrong version input file format;"\ "file should contain CAS_VERSION_MAIN, CAS_VERSION_MAJOR and CAS_VERSION_MINOR"\ "variables along with their respective values" fi + # Make sure version numbers are interpreted by bash as decimal numbers in case any of + # them were being input with leading zeros, which is interpreted as an octal by default. + CAS_VERSION_MAIN=$((10#$CAS_VERSION_MAIN)) + CAS_VERSION_MAJOR=$((10#$CAS_VERSION_MAJOR)) + CAS_VERSION_MINOR=$((10#$CAS_VERSION_MINOR)) + CAS_VERSION_BUILD=$(cd "$SOURCES_DIR" && git log --merges --oneline | wc -l) LAST_COMMIT_HASH=$(cd "$SOURCES_DIR" && git log -1 --pretty=format:%H) LAST_COMMIT_HASH_ABBR=$(cd "$SOURCES_DIR" && git log -1 --pretty=format:%h) @@ -80,7 +87,7 @@ if [[ -d "$SOURCES_DIR/.git" ]] && which git &>/dev/null &&\ echo "FILE_CREATION_DATE=$FILE_CREATION_DATE" >> "$VERSION_FILE" echo "FILE_CREATION_TIMESTAMP=$FILE_CREATION_TIMESTAMP" >> "$VERSION_FILE" elif [[ -r "$VERSION_FILE" ]]; then - . "$VERSION_FILE" >/dev/null + source "$VERSION_FILE" >/dev/null if [[ ! "$CAS_VERSION" ]]; then error "'$VERSION_FILE' - wrong version file format; file does not contain CAS_VERSION" fi