diff --git a/casadm/cas_lib.c b/casadm/cas_lib.c index 8889ed852..27cc0ba16 100644 --- a/casadm/cas_lib.c +++ b/casadm/cas_lib.c @@ -45,20 +45,8 @@ #define CORE_ADD_MAX_TIMEOUT 30 -#define CHECK_IF_CACHE_IS_MOUNTED -1 - -/** - * @brief Routine verifies if filesystem is currently mounted for given cache/core - * - * If FAILURE is returned, reason for failure is printed onto - * standard error. - * @param cache_id cache id of filesystem (to verify if it is mounted) - * @param core_id core id of filesystem (to verify if it is mounted); if this - * parameter is set to negative value, it is only checked if any core belonging - * to given cache is mounted; - * @return SUCCESS if is not mounted; FAILURE if filesystem is mounted - */ -int check_if_mounted(int cache_id, int core_id); +int is_cache_mounted(int cache_id); +int is_core_mounted(int cache_id, int core_id); /* KCAS_IOCTL_CACHE_CHECK_DEVICE wrapper */ int _check_cache_device(const char *device_path, @@ -808,10 +796,6 @@ struct cache_device *get_cache_device(const struct kcas_cache_info *info, bool b cache->promotion_policy = info->info.promotion_policy; cache->size = info->info.cache_line_size; - if ((info->info.state & (1 << ocf_cache_state_running)) == 0) { - return cache; - } - for (cache->core_count = 0; cache->core_count < info->info.core_count; ++cache->core_count) { core_id = info->core_id[cache->core_count]; @@ -957,16 +941,13 @@ int check_cache_already_added(const char *cache_device) { return SUCCESS; } -int start_cache(uint16_t cache_id, unsigned int cache_init, - const char *cache_device, ocf_cache_mode_t cache_mode, - ocf_cache_line_size_t line_size, int force) +static int _verify_and_parse_volume_path(char *tgt_buf, + size_t tgt_buf_size, const char *cache_device, + size_t paths_size) { int fd = 0; - struct kcas_start_cache cmd; - int status; - double min_free_ram_gb; - /* check if cache device given exists */ + /* check if cache device exists */ fd = open(cache_device, 0); if (fd < 0) { cas_printf(LOG_ERR, "Device %s not found.\n", cache_device); @@ -974,25 +955,50 @@ int start_cache(uint16_t cache_id, unsigned int cache_init, } close(fd); + if (set_device_path(tgt_buf, tgt_buf_size, cache_device, paths_size) != SUCCESS) { + return FAILURE; + } + + return SUCCESS; +} + +static int _start_cache(uint16_t cache_id, unsigned int cache_init, + const char *cache_device, ocf_cache_mode_t cache_mode, + ocf_cache_line_size_t line_size, int force, bool start) +{ + int fd = 0; + struct kcas_start_cache cmd = {}; + int status; + int ioctl = start ? KCAS_IOCTL_START_CACHE : KCAS_IOCTL_ATTACH_CACHE; + double min_free_ram_gb; + fd = open_ctrl_device(); if (fd == -1) return FAILURE; - memset(&cmd, 0, sizeof(cmd)); - - cmd.cache_id = cache_id; - cmd.init_cache = cache_init; - if (set_device_path(cmd.cache_path_name, sizeof(cmd.cache_path_name), - cache_device, MAX_STR_LEN) != SUCCESS) { + status = _verify_and_parse_volume_path( + cmd.cache_path_name, + sizeof(cmd.cache_path_name), + cache_device, + MAX_STR_LEN); + if (status != SUCCESS) { close(fd); return FAILURE; } + + cmd.cache_id = cache_id; cmd.caching_mode = cache_mode; cmd.line_size = line_size; cmd.force = (uint8_t)force; + cmd.init_cache = cache_init; - status = run_ioctl_interruptible_retry(fd, KCAS_IOCTL_START_CACHE, &cmd, - "Starting cache", cache_id, OCF_CORE_ID_INVALID); + status = run_ioctl_interruptible_retry( + fd, + ioctl, + &cmd, + start ? "Starting cache" : "Attaching device to cache", + cache_id, + OCF_CORE_ID_INVALID); cache_id = cmd.cache_id; if (status < 0) { close(fd); @@ -1002,9 +1008,11 @@ int start_cache(uint16_t cache_id, unsigned int cache_init, min_free_ram_gb /= GiB; cas_printf(LOG_ERR, "Not enough free RAM.\n" - "You need at least %0.2fGB to start cache" + "You need at least %0.2fGB to %s cache" " with cache line size equal %llukB.\n", - min_free_ram_gb, line_size / KiB); + min_free_ram_gb, + start ? "start" : "attach a device to", + line_size / KiB); if (64 * KiB > line_size) cas_printf(LOG_ERR, "Try with greater cache line size.\n"); @@ -1025,48 +1033,132 @@ int start_cache(uint16_t cache_id, unsigned int cache_init, check_cache_state_incomplete(cache_id, fd); close(fd); - cas_printf(LOG_INFO, "Successfully added cache instance %u\n", cache_id); + cas_printf(LOG_INFO, "Successfully %s %u\n", + start ? "added cache instance" : "attached device to cache", + cache_id); return SUCCESS; } -int stop_cache(uint16_t cache_id, int flush) +int start_cache(uint16_t cache_id, unsigned int cache_init, + const char *cache_device, ocf_cache_mode_t cache_mode, + ocf_cache_line_size_t line_size, int force) +{ + return _start_cache(cache_id, cache_init, cache_device, cache_mode, + line_size, force, true); +} + +int attach_cache(uint16_t cache_id, const char *cache_device, int force) +{ + return _start_cache(cache_id, CACHE_INIT_NEW, cache_device, + ocf_cache_mode_none, ocf_cache_line_size_none, force, false); +} + +int detach_cache(uint16_t cache_id) { int fd = 0; - struct kcas_stop_cache cmd; + struct kcas_stop_cache cmd = {}; + int ioctl_code = KCAS_IOCTL_DETACH_CACHE; + int status; - /* don't even attempt ioctl if filesystem is mounted */ - if (check_if_mounted(cache_id, CHECK_IF_CACHE_IS_MOUNTED) == FAILURE) { + fd = open_ctrl_device(); + if (fd == -1) return FAILURE; + + cmd.cache_id = cache_id; + cmd.flush_data = true; + + status = run_ioctl_interruptible_retry( + fd, + ioctl_code, + &cmd, + "Detaching the device from cache", + cache_id, + OCF_CORE_ID_INVALID); + close(fd); + + if (status < 0) { + if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) { + cas_printf(LOG_ERR, + "You have interrupted detaching the device " + "from cache %d. CAS continues to operate " + "normally.\n", + cache_id + ); + return INTERRUPTED; + } else if (OCF_ERR_WRITE_CACHE == cmd.ext_err_code) { + cas_printf(LOG_ERR, + "Detached the device from cache %d " + "with errors\n", + cache_id + ); + print_err(cmd.ext_err_code); + return FAILURE; + } else { + cas_printf(LOG_ERR, + "Error while detaching the device from" + " cache %d\n", + cache_id + ); + print_err(cmd.ext_err_code); + return FAILURE; + } } + cas_printf(LOG_INFO, "Successfully detached device from cache %hu\n", + cache_id); + + return SUCCESS; +} + +int stop_cache(uint16_t cache_id, int flush) +{ + int fd = 0; + struct kcas_stop_cache cmd = {}; + int ioctl_code = KCAS_IOCTL_STOP_CACHE; + int status; + + /* Don't stop instance with mounted filesystem */ + if (is_cache_mounted(cache_id) == FAILURE) + return FAILURE; + fd = open_ctrl_device(); if (fd == -1) return FAILURE; - memset(&cmd, 0, sizeof(cmd)); cmd.cache_id = cache_id; cmd.flush_data = flush; - if(run_ioctl_interruptible_retry(fd, KCAS_IOCTL_STOP_CACHE, &cmd, "Stopping cache", - cache_id, OCF_CORE_ID_INVALID) < 0) { - close(fd); + status = run_ioctl_interruptible_retry( + fd, + ioctl_code, + &cmd, + "Stopping cache", + cache_id, + OCF_CORE_ID_INVALID); + close(fd); + + if (status < 0) { if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) { - cas_printf(LOG_ERR, "You have interrupted stopping of cache. CAS continues\n" - "to operate normally. If you want to stop cache without fully\n" - "flushing dirty data, use '-n' option.\n"); + cas_printf(LOG_ERR, + "You have interrupted stopping of cache %d. " + "CAS continues\nto operate normally. The cache" + " can be stopped without\nflushing dirty data " + "by using '-n' option.\n", cache_id); return INTERRUPTED; - } else if (cmd.ext_err_code == OCF_ERR_WRITE_CACHE){ - cas_printf(LOG_ERR, "Removed cache %d with errors\n", cache_id); + } else if (OCF_ERR_WRITE_CACHE == cmd.ext_err_code){ + cas_printf(LOG_ERR, "Stopped cache %d with errors\n", cache_id); print_err(cmd.ext_err_code); return FAILURE; } else { - cas_printf(LOG_ERR, "Error while removing cache %d\n", cache_id); + cas_printf(LOG_ERR, "Error while stopping cache %d\n", cache_id); print_err(cmd.ext_err_code); return FAILURE; } } - close(fd); + + cas_printf(LOG_INFO, "Successfully stopped cache %hu\n", cache_id); + return SUCCESS; } @@ -1711,7 +1803,7 @@ int add_core(unsigned int cache_id, unsigned int core_id, const char *core_devic return SUCCESS; } -int check_if_mounted(int cache_id, int core_id) +int _check_if_mounted(int cache_id, int core_id) { FILE *mtab; struct mntent *mstruct; @@ -1755,6 +1847,16 @@ int check_if_mounted(int cache_id, int core_id) } +int is_cache_mounted(int cache_id) +{ + return _check_if_mounted(cache_id, -1); +} + +int is_core_mounted(int cache_id, int core_id) +{ + return _check_if_mounted(cache_id, core_id); +} + int remove_core(unsigned int cache_id, unsigned int core_id, bool detach, bool force_no_flush) { @@ -1762,7 +1864,7 @@ int remove_core(unsigned int cache_id, unsigned int core_id, struct kcas_remove_core cmd; /* don't even attempt ioctl if filesystem is mounted */ - if (SUCCESS != check_if_mounted(cache_id, core_id)) { + if (SUCCESS != is_core_mounted(cache_id, core_id)) { return FAILURE; } @@ -1828,7 +1930,7 @@ int remove_inactive_core(unsigned int cache_id, unsigned int core_id, struct kcas_remove_inactive cmd; /* don't even attempt ioctl if filesystem is mounted */ - if (SUCCESS != check_if_mounted(cache_id, core_id)) { + if (SUCCESS != is_core_mounted(cache_id, core_id)) { return FAILURE; } @@ -2692,6 +2794,7 @@ int list_caches(unsigned int list_format, bool by_id_path) char cache_ctrl_dev[MAX_STR_LEN] = "-"; float cache_flush_prog; float core_flush_prog; + bool cache_device_detached; if (!by_id_path && !curr_cache->standby_detached) { if (get_dev_path(curr_cache->device, curr_cache->device, @@ -2723,11 +2826,16 @@ int list_caches(unsigned int list_format, bool by_id_path) } } + cache_device_detached = + ((curr_cache->state & (1 << ocf_cache_state_standby)) | + (curr_cache->state & (1 << ocf_cache_state_detached))) + ; + fprintf(intermediate_file[1], TAG(TREE_BRANCH) "%s,%u,%s,%s,%s,%s\n", "cache", /* type */ curr_cache->id, /* id */ - curr_cache->standby_detached ? "-" : curr_cache->device, /* device path */ + cache_device_detached ? "-" : curr_cache->device, /* device path */ tmp_status, /* cache status */ mode_string, /* write policy */ cache_ctrl_dev /* device */); diff --git a/casadm/cas_lib.h b/casadm/cas_lib.h index 8144376d2..9d8c48d99 100644 --- a/casadm/cas_lib.h +++ b/casadm/cas_lib.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -116,6 +117,9 @@ int start_cache(uint16_t cache_id, unsigned int cache_init, ocf_cache_line_size_t line_size, int force); int stop_cache(uint16_t cache_id, int flush); +int detach_cache(uint16_t cache_id); +int attach_cache(uint16_t cache_id, const char *cache_device, int force); + #ifdef WI_AVAILABLE #define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt|wi|wo" #define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt|wi|wo" diff --git a/casadm/cas_main.c b/casadm/cas_main.c index e6d656139..f41fb5a4e 100644 --- a/casadm/cas_main.c +++ b/casadm/cas_main.c @@ -332,6 +332,13 @@ static cli_option start_options[] = { {0} }; +static cli_option attach_cache_options[] = { + {'d', "cache-device", CACHE_DEVICE_DESC, 1, "DEVICE", CLI_OPTION_REQUIRED}, + {'i', "cache-id", CACHE_ID_DESC_LONG, 1, "ID", CLI_OPTION_REQUIRED}, + {'f', "force", "Force attaching the cache device"}, + {0} +}; + static int check_fs(const char* device, bool force) { char cache_dev_path[MAX_STR_LEN]; @@ -405,6 +412,20 @@ int validate_cache_path(const char* path, bool force) return SUCCESS; } +int handle_cache_attach(void) +{ + return attach_cache( + command_args_values.cache_id, + command_args_values.cache_device, + command_args_values.force + ); +} + +int handle_cache_detach(void) +{ + return detach_cache(command_args_values.cache_id); +} + int handle_start() { int status; @@ -527,6 +548,11 @@ static cli_option stop_options[] = { {0} }; +static cli_option detach_options[] = { + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {0} +}; + int handle_stop() { return stop_cache(command_args_values.cache_id, @@ -2204,6 +2230,26 @@ static cli_command cas_commands[] = { .flags = CLI_SU_REQUIRED, .help = NULL, }, + { + .name = "attach-cache", + .desc = "Attach cache device", + .long_desc = NULL, + .options = attach_cache_options, + .command_handle_opts = start_cache_command_handle_option, + .handle = handle_cache_attach, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "detach-cache", + .desc = "Detach cache device", + .long_desc = NULL, + .options = detach_options, + .command_handle_opts = command_handle_option, + .handle = handle_cache_detach, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, { .name = "stop-cache", .short_name = 'T', diff --git a/casadm/extended_err_msg.c b/casadm/extended_err_msg.c index 431bfa01a..264e36e84 100644 --- a/casadm/extended_err_msg.c +++ b/casadm/extended_err_msg.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -178,6 +179,10 @@ struct { OCF_ERR_CACHE_LINE_SIZE_MISMATCH, "Cache line size mismatch" }, + { + OCF_ERR_CACHE_DETACHED, + "The operation is not permited while the cache is detached" + }, { OCF_ERR_CACHE_STANDBY, "The operation is not permited while the cache is in the standby mode" @@ -239,6 +244,11 @@ struct { "Device contains partitions.\nIf you want to continue, " "please use --force option.\nWarning: all data will be lost!" }, + { + KCAS_ERR_DEVICE_PROPERTIES_MISMATCH, + "The new device's properties doesn't match the original cache device's" + " properties" + }, { KCAS_ERR_A_PART, "Formatting of partition is unsupported." diff --git a/casadm/table.c b/casadm/table.c index 506e220e0..efbc5dd5b 100644 --- a/casadm/table.c +++ b/casadm/table.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2021 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ #include @@ -124,6 +125,8 @@ int table_set(struct table *t, int y, int x, char *c) int len = strnlen(c, MAX_STR_LEN); if (len >= MAX_STR_LEN) { return 1; + } else if (len == 0) { + return 0; } /* step 1: ensure that space for row y is allocated */ diff --git a/configure.d/1_queue_limits.conf b/configure.d/1_queue_limits.conf index 34beead27..a9059bc2f 100644 --- a/configure.d/1_queue_limits.conf +++ b/configure.d/1_queue_limits.conf @@ -1,6 +1,7 @@ #!/bin/bash # # Copyright(c) 2012-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies # SPDX-License-Identifier: BSD-3-Clause # @@ -10,6 +11,7 @@ check() { cur_name=$(basename $2) config_file_path=$1 + if compile_module $cur_name "struct queue_limits q; q.limits_aux;" "linux/blkdev.h" then echo $cur_name "1" >> $config_file_path @@ -24,9 +26,9 @@ check() { elif compile_module $cur_name "struct queue_limits q; q.max_write_same_sectors;" "linux/blkdev.h" then echo $cur_name "4" >> $config_file_path - else - echo $cur_name "X" >> $config_file_path - fi + else + echo $cur_name "X" >> $config_file_path + fi } apply() { @@ -34,53 +36,73 @@ apply() { "1") add_function " static inline void cas_copy_queue_limits(struct request_queue *exp_q, - struct request_queue *cache_q, struct request_queue *core_q) + struct queue_limits *cache_q_limits, struct request_queue *core_q) { struct queue_limits_aux *l_aux = exp_q->limits.limits_aux; - exp_q->limits = cache_q->limits; + exp_q->limits = *cache_q_limits; exp_q->limits.limits_aux = l_aux; - if (exp_q->limits.limits_aux && cache_q->limits.limits_aux) - *exp_q->limits.limits_aux = *cache_q->limits.limits_aux; + if (exp_q->limits.limits_aux && cache_q_limits->limits_aux) + *exp_q->limits.limits_aux = *cache_q_limits->limits_aux; exp_q->limits.max_sectors = core_q->limits.max_sectors; exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; exp_q->limits.max_segments = core_q->limits.max_segments; exp_q->limits.max_write_same_sectors = 0; + }" + + add_function " + static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q) + { if (queue_virt_boundary(cache_q)) queue_flag_set(QUEUE_FLAG_NOMERGES, cache_q); }" ;; "2") add_function " static inline void cas_copy_queue_limits(struct request_queue *exp_q, - struct request_queue *cache_q, struct request_queue *core_q) + struct queue_limits *cache_q_limits, struct request_queue *core_q) { - exp_q->limits = cache_q->limits; + exp_q->limits = *cache_q_limits; exp_q->limits.max_sectors = core_q->limits.max_sectors; exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; exp_q->limits.max_segments = core_q->limits.max_segments; exp_q->limits.max_write_same_sectors = 0; exp_q->limits.max_write_zeroes_sectors = 0; + }" + + add_function " + static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q) + { }" ;; "3") add_function " static inline void cas_copy_queue_limits(struct request_queue *exp_q, - struct request_queue *cache_q, struct request_queue *core_q) + struct queue_limits *cache_q_limits, struct request_queue *core_q) { - exp_q->limits = cache_q->limits; + exp_q->limits = *cache_q_limits; exp_q->limits.max_sectors = core_q->limits.max_sectors; exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; exp_q->limits.max_segments = core_q->limits.max_segments; exp_q->limits.max_write_zeroes_sectors = 0; + }" + + add_function " + static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q) + { }" ;; "4") add_function " static inline void cas_copy_queue_limits(struct request_queue *exp_q, - struct request_queue *cache_q, struct request_queue *core_q) + struct queue_limits *cache_q_limits, struct request_queue *core_q) { - exp_q->limits = cache_q->limits; + exp_q->limits = *cache_q_limits; exp_q->limits.max_sectors = core_q->limits.max_sectors; exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; exp_q->limits.max_segments = core_q->limits.max_segments; exp_q->limits.max_write_same_sectors = 0; + }" + + add_function " + static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q) + { }" ;; diff --git a/modules/cas_cache/cas_cache.h b/modules/cas_cache/cas_cache.h index ac2768111..efc78b200 100644 --- a/modules/cas_cache/cas_cache.h +++ b/modules/cas_cache/cas_cache.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -70,6 +71,11 @@ struct cache_priv { ocf_queue_t mngt_queue; void *attach_context; bool cache_exp_obj_initialized; + struct { + struct queue_limits queue_limits; + bool fua; + bool flush; + } device_properties; ocf_queue_t io_queues[]; }; diff --git a/modules/cas_cache/layer_cache_management.c b/modules/cas_cache/layer_cache_management.c index 0e88ffde1..15ef0e731 100644 --- a/modules/cas_cache/layer_cache_management.c +++ b/modules/cas_cache/layer_cache_management.c @@ -1271,7 +1271,10 @@ static int cache_mngt_update_core_uuid(ocf_cache_t cache, const char *core_name, if (result) return result; - return _cache_mngt_save_sync(cache); + if (ocf_cache_is_device_attached(cache)) + result = _cache_mngt_save_sync(cache); + + return result; } static void _cache_mngt_log_core_device_path(ocf_core_t core) @@ -1717,7 +1720,12 @@ int cache_mngt_set_partitions(const char *cache_name, size_t name_len, if (ocf_cache_is_standby(cache)) { result = -OCF_ERR_CACHE_STANDBY; - goto out_standby; + goto out_not_running; + } + + if (!ocf_cache_is_device_attached(cache)) { + result = -OCF_ERR_CACHE_DETACHED; + goto out_not_running; } for (class_id = 0; class_id < OCF_USER_IO_CLASS_MAX; class_id++) { @@ -1752,7 +1760,7 @@ int cache_mngt_set_partitions(const char *cache_name, size_t name_len, while (class_id--) cas_cls_rule_destroy(cache, cls_rule[class_id]); } -out_standby: +out_not_running: ocf_mngt_cache_put(cache); out_get: kfree(io_class_cfg); @@ -1868,6 +1876,39 @@ static int cache_mngt_create_cache_device_cfg( return 0; } +int cache_mngt_attach_cache_cfg(char *cache_name, size_t name_len, + struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_attach_config *attach_cfg, + struct kcas_start_cache *cmd) +{ + int result; + + if (!cmd) + return -OCF_ERR_INVAL; + + memset(cfg, 0, sizeof(*cfg)); + memset(attach_cfg, 0, sizeof(*attach_cfg)); + + result = cache_mngt_create_cache_device_cfg(&attach_cfg->device, + cmd->cache_path_name); + if (result) + return result; + + //TODO maybe attach should allow to change cache line size? + //cfg->cache_line_size = cmd->line_size; + cfg->use_submit_io_fast = !use_io_scheduler; + cfg->locked = true; + cfg->metadata_volatile = true; + + cfg->backfill.max_queue_size = max_writeback_queue_size; + cfg->backfill.queue_unblock_size = writeback_queue_unblock_size; + attach_cfg->cache_line_size = cmd->line_size; + attach_cfg->force = cmd->force; + attach_cfg->discard_on_start = true; + + return 0; +} + static void cache_mngt_destroy_cache_device_cfg( struct ocf_mngt_cache_device_config *cfg) { @@ -2048,7 +2089,6 @@ static void init_instance_complete(struct _cache_mngt_attach_context *ctx, } - static void calculate_min_ram_size(ocf_cache_t cache, struct _cache_mngt_attach_context *ctx) { @@ -2078,6 +2118,30 @@ static void calculate_min_ram_size(ocf_cache_t cache, printk(KERN_WARNING "Cannot calculate amount of DRAM needed\n"); } +static void _cache_mngt_attach_complete(ocf_cache_t cache, void *priv, + int error) +{ + struct _cache_mngt_attach_context *ctx = priv; + int caller_status; + char *path; + + cache_mngt_destroy_cache_device_cfg(&ctx->device_cfg); + + if (!error) { + path = (char *)ocf_volume_get_uuid(ocf_cache_get_volume( + cache))->data; + printk(KERN_INFO "Succsessfully attached %s\n", path); + } + + caller_status = _cache_mngt_async_callee_set_result(&ctx->async, error); + if (caller_status != -KCAS_ERR_WAITING_INTERRUPTED) + return; + + kfree(ctx); + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); +} + static void _cache_mngt_start_complete(ocf_cache_t cache, void *priv, int error) { struct _cache_mngt_attach_context *ctx = priv; @@ -2202,6 +2266,42 @@ static int _cache_mngt_probe_metadata(char *cache_path_name, return result; } +static void volume_set_no_merges_flag_helper(ocf_cache_t cache) +{ + struct request_queue *cache_q; + struct bd_object *bvol; + struct block_device *bd; + ocf_volume_t volume; + + volume = ocf_cache_get_volume(cache); + if (!volume) + return; + + bvol = bd_object(volume); + bd = cas_disk_get_blkdev(bvol->dsk); + cache_q = bd->bd_disk->queue; + + cas_cache_set_no_merges_flag(cache_q); +} + +static void _cache_save_device_properties(ocf_cache_t cache) +{ + struct block_device *bd; + struct bd_object *bvol; + struct request_queue *cache_q; + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); + + bvol = bd_object(ocf_cache_get_volume(cache)); + bd = cas_disk_get_blkdev(bvol->dsk); + cache_q = bd->bd_disk->queue; + + cache_priv->device_properties.queue_limits = cache_q->limits; + cache_priv->device_properties.flush = + CAS_CHECK_QUEUE_FLUSH(cache_q); + cache_priv->device_properties.fua = + CAS_CHECK_QUEUE_FUA(cache_q); +} + static int _cache_start_finalize(ocf_cache_t cache, int init_mode, bool activate) { @@ -2219,6 +2319,10 @@ static int _cache_start_finalize(ocf_cache_t cache, int init_mode, return result; } ctx->cls_inited = true; + + volume_set_no_merges_flag_helper(cache); + + _cache_save_device_properties(cache); } if (activate) @@ -2258,14 +2362,21 @@ static int _cache_start_finalize(ocf_cache_t cache, int init_mode, } static int cache_mngt_check_bdev(struct ocf_mngt_cache_device_config *cfg, - bool force) + bool force, bool reattach, ocf_cache_t cache) { char holder[] = "CAS START\n"; cas_bdev_handle_t bdev_handle; struct block_device *bdev; int part_count; bool is_part; + bool reattach_properties_diff = false; + struct cache_priv *cache_priv; const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(cfg->volume); + /* The only reason to use blk_stack_limits() is checking compatibility of + * the new device with the original cache. But since the functions modifies + * content of queue_limits, we use copy of the original struct. + */ + struct queue_limits tmp_limits; bdev_handle = cas_bdev_open_by_path(uuid->data, (CAS_BLK_MODE_EXCL | CAS_BLK_MODE_READ), holder); @@ -2278,12 +2389,48 @@ static int cache_mngt_check_bdev(struct ocf_mngt_cache_device_config *cfg, is_part = (cas_bdev_whole(bdev) != bdev); part_count = cas_blk_get_part_count(bdev); + + if (reattach) { + ENV_BUG_ON(!cache); + + cache_priv = ocf_cache_get_priv(cache); + tmp_limits = cache_priv->device_properties.queue_limits; + + if (blk_stack_limits(&tmp_limits, &bdev->bd_disk->queue->limits, 0)) { + reattach_properties_diff = true; + printk(KERN_WARNING "New cache device block properties " + "differ from the previous one.\n"); + } + if (tmp_limits.misaligned) { + reattach_properties_diff = true; + printk(KERN_WARNING "New cache device block interval " + "doesn't line up with the previous one.\n"); + } + if (CAS_CHECK_QUEUE_FLUSH(bdev->bd_disk->queue) != + cache_priv->device_properties.flush) { + reattach_properties_diff = true; + printk(KERN_WARNING "New cache device %s support flush " + "in contrary to the previous cache device.\n", + cache_priv->device_properties.flush ? "doesn't" : "does"); + } + if (CAS_CHECK_QUEUE_FUA(bdev->bd_disk->queue) != + cache_priv->device_properties.fua) { + reattach_properties_diff = true; + printk(KERN_WARNING "New cache device %s support fua " + "in contrary to the previous cache device.\n", + cache_priv->device_properties.fua ? "doesn't" : "does"); + } + } + cas_bdev_release(bdev_handle, (CAS_BLK_MODE_EXCL | CAS_BLK_MODE_READ), holder); if (!is_part && part_count > 1 && !force) return -KCAS_ERR_CONTAINS_PART; + if (reattach_properties_diff) + return -KCAS_ERR_DEVICE_PROPERTIES_MISMATCH; + return 0; } @@ -2362,6 +2509,72 @@ int cache_mngt_create_cache_standby_activate_cfg( return 0; } +static void _cache_mngt_detach_cache_complete(ocf_cache_t cache, void *priv, + int error) +{ + struct _cache_mngt_async_context *context = priv; + int result; + + result = _cache_mngt_async_callee_set_result(context, error); + + if (result != -KCAS_ERR_WAITING_INTERRUPTED) + return; + + kfree(context); + ocf_mngt_cache_unlock(cache); + kfree(context); +} + +int cache_mngt_attach_device(const char *cache_name, size_t name_len, + const char *device, struct ocf_mngt_cache_attach_config *attach_cfg) +{ + struct _cache_mngt_attach_context *context; + ocf_cache_t cache; + int result = 0; + + result = ocf_mngt_cache_get_by_name(cas_ctx, cache_name, + OCF_CACHE_NAME_SIZE, &cache); + if (result) + goto err_get; + + result = _cache_mngt_lock_sync(cache); + if (result) + goto err_lock; + + result = cache_mngt_check_bdev(&attach_cfg->device, + attach_cfg->force, true, cache); + if (result) + goto err_ctx; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) { + result = -ENOMEM; + goto err_ctx; + } + + context->device_cfg = attach_cfg->device; + + _cache_mngt_async_context_init(&context->async); + + ocf_mngt_cache_attach(cache, attach_cfg, _cache_mngt_attach_complete, + context); + result = wait_for_completion_interruptible(&context->async.cmpl); + + result = _cache_mngt_async_caller_set_result(&context->async, result); + if (result == -KCAS_ERR_WAITING_INTERRUPTED) + goto err_get; + + volume_set_no_merges_flag_helper(cache); + + kfree(context); +err_ctx: + ocf_mngt_cache_unlock(cache); +err_lock: + ocf_mngt_cache_put(cache); +err_get: + return result; +} + int cache_mngt_activate(struct ocf_mngt_cache_standby_activate_config *cfg, struct kcas_standby_activate *cmd) { @@ -2396,7 +2609,7 @@ int cache_mngt_activate(struct ocf_mngt_cache_standby_activate_config *cfg, * to compare data on drive and in DRAM to provide more specific * error code. */ - result = cache_mngt_check_bdev(&cfg->device, true); + result = cache_mngt_check_bdev(&cfg->device, true, false, NULL); if (result) goto out_cache_unlock; @@ -2493,7 +2706,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, if (!try_module_get(THIS_MODULE)) return -KCAS_ERR_SYSTEM; - result = cache_mngt_check_bdev(&attach_cfg->device, attach_cfg->force); + result = cache_mngt_check_bdev(&attach_cfg->device, attach_cfg->force, false, NULL); if (result) { module_put(THIS_MODULE); return result; @@ -2864,6 +3077,11 @@ int cache_mngt_set_cache_mode(const char *cache_name, size_t name_len, goto put; } + if (!ocf_cache_is_device_attached(cache)) { + result = -OCF_ERR_CACHE_DETACHED; + goto put; + } + old_mode = ocf_cache_get_mode(cache); if (old_mode == mode) { printk(KERN_INFO "%s is in requested cache mode already\n", cache_name); @@ -2911,6 +3129,53 @@ int cache_mngt_set_cache_mode(const char *cache_name, size_t name_len, return result; } +int cache_mngt_detach_cache(const char *cache_name, size_t name_len) +{ + ocf_cache_t cache; + int status = 0; + struct _cache_mngt_async_context *context; + + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + _cache_mngt_async_context_init(context); + + status = ocf_mngt_cache_get_by_name(cas_ctx, cache_name, + name_len, &cache); + if (status) + goto err_get_cache; + + if (ocf_cache_is_running(cache)) + status = _cache_flush_with_lock(cache); + if (status) + goto err_flush; + + status = _cache_mngt_lock_sync(cache); + if (status) + goto err_lock; + + ocf_mngt_cache_detach(cache, _cache_mngt_detach_cache_complete, context); + + status = wait_for_completion_interruptible(&context->cmpl); + status = _cache_mngt_async_caller_set_result(context, status); + + if (status == -KCAS_ERR_WAITING_INTERRUPTED) { + printk(KERN_WARNING "Waiting for cache detach interrupted. " + "The operation will finish asynchronously.\n"); + goto err_int; + } + + ocf_mngt_cache_unlock(cache); +err_lock: +err_flush: + ocf_mngt_cache_put(cache); +err_get_cache: + kfree(context); +err_int: + return status; +} + /** * @brief routine implements --stop-cache command. * @param[in] cache_name caching device name to be removed diff --git a/modules/cas_cache/layer_cache_management.h b/modules/cas_cache/layer_cache_management.h index 24f6c66cf..4e36d2c4d 100644 --- a/modules/cas_cache/layer_cache_management.h +++ b/modules/cas_cache/layer_cache_management.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ #ifndef __LAYER_CACHE_MANAGEMENT_H__ @@ -42,6 +43,11 @@ int cache_mngt_reset_stats(const char *cache_name, size_t cache_name_len, int cache_mngt_set_partitions(const char *cache_name, size_t name_len, struct kcas_io_classes *cfg); +int cache_mngt_detach_cache(const char *cache_name, size_t name_len); + +int cache_mngt_attach_device(const char *cache_name, size_t name_len, + const char *device, struct ocf_mngt_cache_attach_config *attach_cfg); + int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush); @@ -49,6 +55,11 @@ int cache_mngt_create_cache_cfg(struct ocf_mngt_cache_config *cfg, struct ocf_mngt_cache_attach_config *attach_cfg, struct kcas_start_cache *cmd); +int cache_mngt_attach_cache_cfg(char *cache_name, size_t name_len, + struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_attach_config *attach_cfg, + struct kcas_start_cache *cmd); + int cache_mngt_core_pool_get_paths(struct kcas_core_pool_path *cmd_info); int cache_mngt_core_pool_remove(struct kcas_core_pool_remove *cmd_info); diff --git a/modules/cas_cache/service_ui_ioctl.c b/modules/cas_cache/service_ui_ioctl.c index ae8fc7bd8..0bd8e19ca 100644 --- a/modules/cas_cache/service_ui_ioctl.c +++ b/modules/cas_cache/service_ui_ioctl.c @@ -78,6 +78,41 @@ long cas_service_ioctl_ctrl(struct file *filp, unsigned int cmd, RETURN_CMD_RESULT(cmd_info, arg, retval); } + case KCAS_IOCTL_ATTACH_CACHE: { + struct kcas_start_cache *cmd_info; + struct ocf_mngt_cache_config cfg; + struct ocf_mngt_cache_attach_config attach_cfg; + char cache_name[OCF_CACHE_NAME_SIZE]; + + GET_CMD_INFO(cmd_info, arg); + + cache_name_from_id(cache_name, cmd_info->cache_id); + + retval = cache_mngt_attach_cache_cfg(cache_name, OCF_CACHE_NAME_SIZE, + &cfg, &attach_cfg, cmd_info); + if (retval) + RETURN_CMD_RESULT(cmd_info, arg, retval); + + retval = cache_mngt_attach_device(cache_name, OCF_CACHE_NAME_SIZE, + cmd_info->cache_path_name, &attach_cfg); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_DETACH_CACHE: { + struct kcas_stop_cache *cmd_info; + char cache_name[OCF_CACHE_NAME_SIZE]; + + GET_CMD_INFO(cmd_info, arg); + + cache_name_from_id(cache_name, cmd_info->cache_id); + + retval = cache_mngt_detach_cache(cache_name, + OCF_CACHE_NAME_SIZE); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + case KCAS_IOCTL_SET_CACHE_STATE: { struct kcas_set_cache_state *cmd_info; char cache_name[OCF_CACHE_NAME_SIZE]; diff --git a/modules/cas_cache/volume/vol_block_dev_top.c b/modules/cas_cache/volume/vol_block_dev_top.c index 397dc2624..7bb1b176c 100644 --- a/modules/cas_cache/volume/vol_block_dev_top.c +++ b/modules/cas_cache/volume/vol_block_dev_top.c @@ -35,35 +35,29 @@ static void blkdev_set_exported_object_flush_fua(ocf_core_t core) { ocf_cache_t cache = ocf_core_get_cache(core); ocf_volume_t core_vol = ocf_core_get_volume(core); - ocf_volume_t cache_vol = ocf_cache_get_volume(cache); - struct bd_object *bd_core_vol, *bd_cache_vol; - struct request_queue *core_q, *exp_q, *cache_q; + struct bd_object *bd_core_vol; + struct request_queue *core_q, *exp_q; bool flush, fua; - - BUG_ON(!cache_vol); - + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); bd_core_vol = bd_object(core_vol); - bd_cache_vol = bd_object(cache_vol); core_q = cas_disk_get_queue(bd_core_vol->dsk); exp_q = cas_exp_obj_get_queue(bd_core_vol->dsk); - cache_q = cas_disk_get_queue(bd_cache_vol->dsk); - flush = (CAS_CHECK_QUEUE_FLUSH(core_q) || CAS_CHECK_QUEUE_FLUSH(cache_q)); - fua = (CAS_CHECK_QUEUE_FUA(core_q) || CAS_CHECK_QUEUE_FUA(cache_q)); + flush = (CAS_CHECK_QUEUE_FLUSH(core_q) || + cache_priv->device_properties.flush); + fua = (CAS_CHECK_QUEUE_FUA(core_q) || cache_priv->device_properties.fua); cas_set_queue_flush_fua(exp_q, flush, fua); } static void blkdev_set_discard_properties(ocf_cache_t cache, - struct request_queue *exp_q, struct block_device *cache_bd, - struct block_device *core_bd, sector_t core_sectors) + struct request_queue *exp_q, struct block_device *core_bd, + sector_t core_sectors) { struct request_queue *core_q; - struct request_queue *cache_q; core_q = bdev_get_queue(core_bd); - cache_q = bdev_get_queue(cache_bd); cas_set_discard_flag(exp_q); @@ -91,38 +85,32 @@ static int blkdev_core_set_geometry(struct cas_disk *dsk, void *private) ocf_core_t core; ocf_cache_t cache; ocf_volume_t core_vol; - ocf_volume_t cache_vol; - struct bd_object *bd_cache_vol; - struct request_queue *core_q, *cache_q, *exp_q; - struct block_device *core_bd, *cache_bd; + struct request_queue *core_q, *exp_q; + struct block_device *core_bd; sector_t sectors; const char *path; + struct cache_priv *cache_priv; BUG_ON(!private); core = private; cache = ocf_core_get_cache(core); core_vol = ocf_core_get_volume(core); - cache_vol = ocf_cache_get_volume(cache); - BUG_ON(!cache_vol); + cache_priv = ocf_cache_get_priv(cache); - bd_cache_vol = bd_object(cache_vol); path = ocf_volume_get_uuid(core_vol)->data; core_bd = cas_disk_get_blkdev(dsk); BUG_ON(!core_bd); - cache_bd = cas_disk_get_blkdev(bd_cache_vol->dsk); - BUG_ON(!cache_bd); - core_q = cas_bdev_whole(core_bd)->bd_disk->queue; - cache_q = cache_bd->bd_disk->queue; exp_q = cas_exp_obj_get_queue(dsk); sectors = ocf_volume_get_length(core_vol) >> SECTOR_SHIFT; set_capacity(cas_exp_obj_get_gendisk(dsk), sectors); - cas_copy_queue_limits(exp_q, cache_q, core_q); + cas_copy_queue_limits(exp_q, &cache_priv->device_properties.queue_limits, + core_q); if (exp_q->limits.logical_block_size > core_q->limits.logical_block_size) { @@ -139,8 +127,7 @@ static int blkdev_core_set_geometry(struct cas_disk *dsk, void *private) blkdev_set_exported_object_flush_fua(core); - blkdev_set_discard_properties(cache, exp_q, cache_bd, core_bd, - sectors); + blkdev_set_discard_properties(cache, exp_q, core_bd, sectors); exp_q->queue_flags |= (1 << QUEUE_FLAG_NONROT); @@ -469,7 +456,8 @@ static int blkdev_cache_set_geometry(struct cas_disk *dsk, void *private) set_capacity(cas_exp_obj_get_gendisk(dsk), sectors); - cas_copy_queue_limits(exp_q, cache_q, cache_q); + cas_copy_queue_limits(exp_q, &cache_q->limits, cache_q); + cas_cache_set_no_merges_flag(cache_q); blk_stack_limits(&exp_q->limits, &cache_q->limits, 0); diff --git a/modules/include/cas_ioctl_codes.h b/modules/include/cas_ioctl_codes.h index 78912aa87..bdfad29e0 100644 --- a/modules/include/cas_ioctl_codes.h +++ b/modules/include/cas_ioctl_codes.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation +* Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -405,6 +406,8 @@ struct kcas_standby_activate * 39 * KCAS_IOCTL_STANDBY_ACTIVATE * OK * * 40 * KCAS_IOCTL_CORE_INFO * OK * * 41 * KCAS_IOCTL_START_CACHE * OK * + * 42 * KCAS_IOCTL_DETACH_CACHE * OK * + * 43 * KCAS_IOCTL_ATTACH_CACHE * OK * ******************************************************************************* */ @@ -503,6 +506,12 @@ struct kcas_standby_activate /** Start new cache instance, load cache or recover cache */ #define KCAS_IOCTL_START_CACHE _IOWR(KCAS_IOCTL_MAGIC, 41, struct kcas_start_cache) +/** Detach cache device */ +#define KCAS_IOCTL_DETACH_CACHE _IOWR(KCAS_IOCTL_MAGIC, 42, struct kcas_stop_cache) + +/** Attach cache device */ +#define KCAS_IOCTL_ATTACH_CACHE _IOWR(KCAS_IOCTL_MAGIC, 43, struct kcas_start_cache) + /** * Extended kernel CAS error codes */ @@ -545,6 +554,11 @@ enum kcas_error { /** Device contains partitions */ KCAS_ERR_CONTAINS_PART, + /** The new device's properties doesn't match the original cache's + * properties + */ + KCAS_ERR_DEVICE_PROPERTIES_MISMATCH, + /** Given device is a partition */ KCAS_ERR_A_PART,