diff --git a/doc/services/storage/index.rst b/doc/services/storage/index.rst index 8d3dd3da50c2be..f6e069ecf2ab16 100644 --- a/doc/services/storage/index.rst +++ b/doc/services/storage/index.rst @@ -7,6 +7,7 @@ Storage :maxdepth: 1 nvs/nvs.rst + zms/zms.rst disk/access.rst flash_map/flash_map.rst fcb/fcb.rst diff --git a/doc/services/storage/zms/zms.rst b/doc/services/storage/zms/zms.rst new file mode 100644 index 00000000000000..d1125e0505d24a --- /dev/null +++ b/doc/services/storage/zms/zms.rst @@ -0,0 +1,432 @@ +.. _zms_api: + +Zephyr Memory Storage (ZMS) +########################### +Zephyr Memory Storage is a new key-value storage system that is designed to work with all types +of non-volatile storage technologies. It supports classical on-chip NOR flash as well as new +technologies like RRAM and MRAM that do not require a separate erase operation at all, that is, +data on these types of devices can be overwritten directly at any time. + +General behavior +**************** +ZMS divides the memory space into sectors (minimum 2), and each sector is filled with key-value +pairs until it is full. + +The key-value pair is divided into two parts: + +- The key part is written in an ATE (Allocation Table Entry) called "ID-ATE" which is stored + starting from the bottom of the sector +- The value part is defined as "DATA" and is stored raw starting from the top of the sector + +Additionally, for each sector we store at the last positions Header-ATEs which are ATEs that +are needed for the sector to describe its status (closed, open) and the current version of ZMS. + +When the current sector is full we verify first that the following sector is empty, we garbage +collect the N+2 sector (where N is the current sector number) by moving the valid ATEs to the +N+1 empty sector, we erase the garbage collected sector and then we close the current sector by +writing a garbage_collect_done ATE and the close ATE (one of the header entries). +Afterwards we move forward to the next sector and start writing entries again. + +This behavior is repeated until it reaches the end of the partition. Then it starts again from +the first sector after garbage collecting it and erasing its content. + +Composition of a sector +======================= +A sector is organized in this form (example with 3 sectors): + +.. list-table:: + :widths: 25 25 25 + :header-rows: 1 + + * - Sector 0 (closed) + - Sector 1 (open) + - Sector 2 (empty) + * - Data_a0 + - Data_b0 + - Data_c0 + * - Data_a1 + - Data_b1 + - Data_c1 + * - Data_a2 + - Data_b2 + - Data_c2 + * - GC_done + - . + - . + * - . + - . + - . + * - . + - . + - . + * - . + - ATE_b2 + - ATE_c2 + * - ATE_a2 + - ATE_b1 + - ATE_c1 + * - ATE_a1 + - ATE_b0 + - ATE_c0 + * - ATE_a0 + - GC_done + - GC_done + * - Close (cyc=1) + - Close (cyc=1) + - Close (cyc=1) + * - Empty (cyc=1) + - Empty (cyc=2) + - Empty (cyc=2) + +Definition of each element in the sector +======================================== + +``Empty ATE:`` is written when erasing a sector (last position of the sector). + +``Close ATE:`` is written when closing a sector (second to last position of the sector). + +``GC_done ATE:`` is written to indicate that the next sector has been already garbage +collected. This ATE could be in any position of the sector. + +``ID-ATE:`` are entries that contain a 32 bits Key and describe where the data is stored, its +size and its crc32 + +``Data:`` is the actual value associated to the ID-ATE + +How does ZMS work? +****************** + +Mounting the Storage system +=========================== + +Mounting the storage starts by getting the flash parameters, checking that the file system +properties are correct (sector_size, sector_count ...) then calling the zms_init function to +make the storage ready. + +To mount the filesystem some elements in the zms_fs structure must be initialized. + +.. code-block:: c + + struct zms_fs { + /** File system offset in flash **/ + off_t offset; + + /** Storage system is split into sectors, each sector size must be multiple of + * erase-blocks if the device has erase capabilities + */ + uint32_t sector_size; + /** Number of sectors in the file system */ + uint32_t sector_count; + + /** Flash device runtime structure */ + const struct device *flash_device; + }; + +Initialization +============== + +As ZMS has a fast-forward write mechanism, we must find the last sector and the last pointer of +the entry where it stopped the last time. +It must look for a closed sector followed by an open one, then within the open sector, it finds +(recover) the last written ATE (Allocation Table Entry). +After that, it checks that the sector after this one is empty, or it will erase it. + +ZMS ID-Data write +=================== + +To avoid rewriting the same data with the same ID again, it must look in all the sectors if the +same ID exist then compares its data, if the data is identical no write is performed. +If we must perform a write, then an ATE and Data (if not a delete) are written in the sector. +If the sector is full (cannot hold the current data + ATE) we have to move to the next sector, +garbage collect the sector after the newly opened one then erase it. +Data size that is smaller or equal to 8 bytes are written within the ATE. + +ZMS ID/data read (with history) +=============================== + +By default it looks for the last data with the same ID by browsing through all stored ATEs from +the most recent ones to the oldest ones. If it finds a valid ATE with a matching ID it retrieves +its data and returns the number of bytes that were read. +If history count is provided that is different than 0, older data with same ID is retrieved. + +ZMS free space calculation +========================== + +ZMS can also return the free space remaining in the partition. +However, this operation is very time consuming and needs to browse all valid ATEs in all sectors +of the partition and for each valid ATE try to find if an older one exist. +It is not recommended for application to use this function often, as it is time consuming and +could slow down the calling thread. + +The cycle counter +================= + +Each sector has a lead cycle counter which is a uin8_t that is used to validate all the other +ATEs. +The lead cycle counter is stored in the empty ATE. +To become valid, an ATE must have the same cycle counter as the one stored in the empty ATE. +Each time an ATE is moved from a sector to another it must get the cycle counter of the +destination sector. +To erase a sector, the cycle counter of the empty ATE is incremented and a single write of the +empty ATE is done. +All the ATEs in that sector become invalid. + +Closing sectors +=============== + +To close a sector a close ATE is added at the end of the sector and it must have the same cycle +counter as the empty ATE. +When closing a sector, all the remaining space that has not been used is filled with garbage data +to avoid having old ATEs with a valid cycle counter. + +Triggering Garbage collection +============================= + +Some applications need to make sure that storage writes have a maximum defined latency. +When calling a ZMS write, the current sector could be almost full and we need to trigger the GC +to switch to the next sector. +This operation is time consuming and it will cause some applications to not meet their real time +constraints. +ZMS adds an API for the application to get the current remaining free space in a sector. +The application could then decide when needed to switch to the next sector if the current one is +almost full and of course it will trigger the garbage collection on the next sector. +This will guarantee the application that the next write won't trigger the garbage collection. + +ATE (Allocation Table Entry) structure +====================================== + +An entry has 16 bytes divided between these variables : + +.. code-block:: c + + struct zms_ate { + uint8_t crc8; /* crc8 check of the entry */ + uint8_t cycle_cnt; /* cycle counter for non erasable devices */ + uint32_t id; /* data id */ + uint16_t len; /* data len within sector */ + union { + uint8_t data[8]; /* used to store small size data */ + struct { + uint32_t offset; /* data offset within sector */ + union { + uint32_t data_crc; /* crc for data */ + uint32_t metadata; /* Used to store metadata information + * such as storage version. + */ + }; + }; + }; + } __packed; + +.. note:: The data CRC is checked only when the whole data of the element is read. + The data CRC is not checked for a partial read, as it is computed for the complete set of data. + +.. note:: Enabling the data CRC feature on a previously existing ZMS content without + data CRC will make all existing data invalid. + +.. _free-space: + +Available space for user data (key-value pairs) +*********************************************** + +For both scenarios ZMS should have always an empty sector to be able to perform the garbage +collection. +So if we suppose that 4 sectors exist in a partition, ZMS will only use 3 sectors to store +Key-value pairs and keep always one (rotating sector) empty to be able to launch GC. + +.. note:: The maximum single data length that could be written at once in a sector is 64K + (This could change in future versions of ZMS) + +Small data values +================= + +For small data values (<= 8 bytes), the data is stored within the entry (ATE) itself and no data +is written at the top of the sector. +ZMS has an entry size of 16 bytes which means that the maximum available space in a partition to +store data is computed in this scenario as : + +.. math:: + + \small\frac{(NUM\_SECTORS - 1) \times (SECTOR\_SIZE - (5 \times ATE\_SIZE))}{2} + +Where: + +``NUM_SECTOR:`` Total number of sectors + +``SECTOR_SIZE:`` Size of the sector + +``ATE_SIZE:`` 16 bytes + +``(5 * ATE_SIZE):`` Reserved ATEs for header and delete items + +For example for 4 sectors of 1024 bytes, free space for data is :math:`\frac{3 \times 944}{2} = 1416 \, \text{ bytes}`. + +Large data values +================= + +Large data values ( > 8 bytes) are stored separately at the top of the sector. +In this case it is hard to estimate the free available space as this depends on the size of +the data. But we can take into account that for N bytes of data (N > 8 bytes) an additional +16 bytes of ATE must be added at the bottom of the sector. + +Let's take an example: + +For a partition that has 4 sectors of 1024 bytes and for data size of 64 bytes. +Only 3 sectors are available for writes with a capacity of 944 bytes each. +Each Key-value pair needs an extra 16 bytes for ATE which makes it possible to store 11 pairs +in each sectors (:math:`\frac{944}{80}`). +Total data that could be stored in this partition for this case is :math:`11 \times 3 \times 64 = 2112 \text{ bytes}` + +.. _wear-leveling: + +Wear leveling +************* + +This storage system is optimized for devices that do not require an erase. +Using storage systems that rely on an erase-value (NVS as an example) will need to emulate the +erase with write operations. This will cause a significant decrease in the life expectancy of +these devices and will cause more delays for write operations and for initialization. +ZMS introduces a cycle count mechanism that avoids emulating erase operation for these devices. +It also guarantees that every memory location is written only once for each cycle of sector write. + +As an example, to erase a 4096 bytes sector on a non erasable device using NVS, 256 flash writes +must be performed (supposing that write-block-size=16 bytes), while using ZMS only 1 write of +16 bytes is needed. This operation is 256 times faster in this case. + +Garbage collection operation is also adding some writes to the memory cell life expectancy as it +is moving some blocks from one sector to another. +To make the garbage collector not affect the life expectancy of the device it is recommended +to dimension correctly the partition size. Its size should be the double of the maximum size of +data (including extra headers) that could be written in the storage. + +See :ref:`free-space`. + +Device lifetime calculation +=========================== + +Storage devices whether they are classical Flash or new technologies like RRAM/MRAM has a limited +life expectancy which is determined by the number of times memory cells can be erased/written. +Flash devices are erased one page at a time as part of their functional behavior (otherwise +memory cells cannot be overwritten) and for non erasable storage devices memory cells can be +overwritten directly. + +A typical scenario is shown here to calculate the life expectancy of a device. +Let's suppose that we store an 8 bytes variable using the same ID but its content changes every +minute. The partition has 4 sectors with 1024 bytes each. +Each write of the variable requires 16 bytes of storage. +As we have 944 bytes available for ATEs for each sector, and because ZMS is a fast-forward +storage system, we are going to rewrite the first location of the first sector after +:math:`\frac{(944 \times 4)}{16} = 236 \text{ minutes}`. + +In addition to the normal writes, garbage collector will move the still valid data from old +sectors to new ones. +As we are using the same ID and a big partition size, no data will be moved by the garbage +collector in this case. +For storage devices that could be written 20000 times, the storage will last about +4.720.000 minutes (~9 years). + +To make a more general formula we must first compute the effective used size in ZMS by our +typical set of data. +For id/data pair with data <= 8 bytes, effective_size is 16 bytes +For id/data pair with data > 8 bytes, effective_size is 16 bytes + sizeof(data) +Let's suppose that total_effective_size is the total size of the set of data that is written in +the storage and that the partition is well dimensioned (double of the effective size) to avoid +having the garbage collector moving blocks all the time. + +The expected life of the device in minutes is computed as : + +.. math:: + + \small\frac{(SECTOR\_EFFECTIVE\_SIZE \times SECTOR\_NUMBER \times MAX\_NUM\_WRITES)}{(TOTAL\_EFFECTIVE\_SIZE \times WR\_MIN)} + +Where: + +``SECTOR_EFFECTIVE_SIZE``: is the size sector - header_size(80 bytes) + +``SECTOR_NUMBER``: is the number of sectors + +``MAX_NUM_WRITES``: is the life expectancy of the storage device in number of writes + +``TOTAL_EFFECTIVE_SIZE``: Total effective size of the set of written data + +``WR_MIN``: Number of writes of the set of data per minute + +Features +******** +ZMS has introduced many features compared to existing storage system like NVS and will evolve +from its initial version to include more features that satisfies new technologies requirements +such as low latency and bigger storage space. + +Existing features +================= +Version1 +-------- +- Supports non erasable devices (only one write operation to erase a sector) +- Supports large partition size and sector size (64 bits address space) +- Supports large IDs width (32 bits) to store ID/Value pairs +- Small sized data ( <= 8 bytes) are stored in the ATE itself +- Built-in Data CRC32 (included in the ATE) +- Versionning of ZMS (to handle future evolution) +- Supports large write-block-size (Only for platforms that need this) + +Future features +=============== + +- Add multiple format ATE support to be able to use ZMS with different ATE formats that satisfies + requirements from application +- Add the possibility to skip garbage collector for some application usage where ID/value pairs + are written periodically and do not exceed half of the partition size (ther is always an old + entry with the same ID). +- Divide IDs into namespaces and allocate IDs on demand from application to handle collisions + between IDs used by different subsystems or samples. +- Add the possibility to retrieve the wear out value of the device based on the cycle count value +- Add a recovery function that can recover a storage partition if something went wrong +- Add a library/application to allow migration from NVS entries to ZMS entries +- Add the possibility to force formatting the storage partition to the ZMS format if something + went wrong when mounting the storage. + +ZMS and other storage systems in Zephyr +======================================= +This section describes ZMS in the wider context of storage systems in Zephyr (not full filesystems, +but simpler, non-hierarchical ones). +Today Zephyr includes at least two other systems that are somewhat comparable in scope and +functionality: :ref:`NVS ` and :ref:`FCB `. +Which one to use in your application will depend on your needs and the hardware you are using, +and this section provides information to help make a choice. + +- If you are using a non erasable technology device like RRAM or MRAM, :ref:`ZMS ` is definitely the + best fit for your storage subsystem as it is designed very well to avoid emulating erase for + these devices and replace it by a single write call. +- For devices with large write_block_size and/or needs a sector size that is different than the + classical flash page size (equal to erase_block_size), :ref:`ZMS ` is also the best fit as there is + the possibility to customize these parameters and add the support of these devices in ZMS. +- For classical flash technology devices, :ref:`NVS ` is recommended as it has low footprint (smaller + ATEs and smaller header ATEs). Erasing flash in NVS is also very fast and do not require an + additional write operation compared to ZMS. + For these devices, NVS reads/writes will be faster as well than ZMS as it has smaller ATE size. +- If your application needs more than 64K IDs for storage, :ref:`ZMS ` is recommended here as it + has a 32-bit ID field. +- If your application is working in a FIFO mode (First-in First-out) then :ref:`FCB ` is + the best storage solution for this use case. + +More generally to make the right choice between NVS and ZMS, all the blockers should be first +verified to make sure that the application could work with one subsystem or the other, then if +both solutions could be implemented, the best choice should be based on the calculations of the +life expectancy of the device described in this section: :ref:`wear-leveling`. + +Sample +****** + +A sample of how ZMS can be used is supplied in :zephyr:code-sample:`zms`. + +API Reference +************* + +The ZMS subsystem APIs are provided by ``zms.h``: + +.. doxygengroup:: zms_data_structures + +.. doxygengroup:: zms_high_level_api + +.. comment + not documenting .. doxygengroup:: zms diff --git a/include/zephyr/fs/zms.h b/include/zephyr/fs/zms.h new file mode 100644 index 00000000000000..1155319d7924bb --- /dev/null +++ b/include/zephyr/fs/zms.h @@ -0,0 +1,215 @@ +/* ZMS: Zephyr Memory Storage + * + * Copyright (c) 2024 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + */ +#ifndef ZEPHYR_INCLUDE_FS_ZMS_H_ +#define ZEPHYR_INCLUDE_FS_ZMS_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Zephyr Memory Storage (ZMS) + * @defgroup zms Zephyr Memory Storage (ZMS) + * @ingroup file_system_storage + * @{ + * @} + */ + +/** + * @brief Zephyr Memory Storage Data Structures + * @defgroup zms_data_structures Zephyr Memory Storage Data Structures + * @ingroup zms + * @{ + */ + +/** + * @brief Zephyr Memory Storage File system structure + */ +struct zms_fs { + /** File system offset in flash **/ + off_t offset; + /** Allocation table entry write address. + * Addresses are stored as uint64_t: + * - high 4 bytes correspond to the sector + * - low 4 bytes are the offset in the sector + */ + uint64_t ate_wra; + /** Data write address */ + uint64_t data_wra; + /** Storage system is split into sectors, each sector size must be multiple of erase-blocks + * if the device has erase capabilities + */ + uint32_t sector_size; + /** Number of sectors in the file system */ + uint32_t sector_count; + /** Current cycle counter of the active sector (pointed by ate_wra)*/ + uint8_t sector_cycle; + /** Flag indicating if the file system is initialized */ + bool ready; + /** Mutex */ + struct k_mutex zms_lock; + /** Flash device runtime structure */ + const struct device *flash_device; + /** Flash memory parameters structure */ + const struct flash_parameters *flash_parameters; + /** Size of an Allocation Table Entry */ + size_t ate_size; +#if CONFIG_ZMS_LOOKUP_CACHE + /** Lookup table used to cache ATE address of a written ID */ + uint64_t lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE]; +#endif +}; + +/** + * @} + */ + +/** + * @brief Zephyr Memory Storage APIs + * @defgroup zms_high_level_api Zephyr Memory Storage APIs + * @ingroup zms + * @{ + */ + +/** + * @brief Mount a ZMS file system onto the device specified in @p fs. + * + * @param fs Pointer to file system + * @retval 0 Success + * @retval -ERRNO errno code if error + */ +int zms_mount(struct zms_fs *fs); + +/** + * @brief Clear the ZMS file system from device. + * + * @param fs Pointer to file system + * @retval 0 Success + * @retval -ERRNO errno code if error + */ +int zms_clear(struct zms_fs *fs); + +/** + * @brief Write an entry to the file system. + * + * @note When @p len parameter is equal to @p 0 then entry is effectively removed (it is + * equivalent to calling of zms_delete). It is not possible to distinguish between a deleted + * entry and an entry with data of length 0. + * + * @param fs Pointer to file system + * @param id Id of the entry to be written + * @param data Pointer to the data to be written + * @param len Number of bytes to be written (maximum 64 KB) + * + * @return Number of bytes written. On success, it will be equal to the number of bytes requested + * to be written. When a rewrite of the same data already stored is attempted, nothing is written + * to flash, thus 0 is returned. On error, returns negative value of errno.h defined error codes. + */ +ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len); + +/** + * @brief Delete an entry from the file system + * + * @param fs Pointer to file system + * @param id Id of the entry to be deleted + * @retval 0 Success + * @retval -ERRNO errno code if error + */ +int zms_delete(struct zms_fs *fs, uint32_t id); + +/** + * @brief Read an entry from the file system. + * + * @param fs Pointer to file system + * @param id Id of the entry to be read + * @param data Pointer to data buffer + * @param len Number of bytes to be read (or size of the allocated read buffer) + * + * @return Number of bytes read. On success, it will be equal to the number of bytes requested + * to be read. When the return value is less than the number of bytes requested to read this + * indicates that ATE contain less data than requested. On error, returns negative value of + * errno.h defined error codes. + */ +ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len); + +/** + * @brief Read a history entry from the file system. + * + * @param fs Pointer to file system + * @param id Id of the entry to be read + * @param data Pointer to data buffer + * @param len Number of bytes to be read + * @param cnt History counter: 0: latest entry, 1: one before latest ... + * + * @return Number of bytes read. On success, it will be equal to the number of bytes requested + * to be read. When the return value is larger than the number of bytes requested to read this + * indicates not all bytes were read, and more data is available. On error, returns negative + * value of errno.h defined error codes. + */ +ssize_t zms_read_hist(struct zms_fs *fs, uint32_t id, void *data, size_t len, uint32_t cnt); + +/** + * @brief Gets the data size that is stored in an entry with a given id + * + * @param fs Pointer to file system + * @param id Id of the entry that we want to get its data length + * + * @return Data length contained in the ATE. On success, it will be equal to the number of bytes + * in the ATE. On error, returns negative value of errno.h defined error codes. + */ +ssize_t zms_get_data_length(struct zms_fs *fs, uint32_t id); +/** + * @brief Calculate the available free space in the file system. + * + * @param fs Pointer to file system + * + * @return Number of bytes free. On success, it will be equal to the number of bytes that can + * still be written to the file system. + * Calculating the free space is a time consuming operation, especially on spi flash. + * On error, returns negative value of errno.h defined error codes. + */ +ssize_t zms_calc_free_space(struct zms_fs *fs); + +/** + * @brief Tell how many contiguous free space remains in the currently active ZMS sector. + * + * @param fs Pointer to the file system. + * + * @return Number of free bytes. + */ +size_t zms_sector_max_data_size(struct zms_fs *fs); + +/** + * @brief Close the currently active sector and switch to the next one. + * + * @note The garbage collector is called on the new sector. + * + * @warning This routine is made available for specific use cases. + * It collides with the ZMS goal of avoiding any unnecessary flash erase operations. + * Using this routine extensively can result in premature failure of the flash device. + * + * @param fs Pointer to the file system. + * + * @return 0 on success. On error, returns negative value of errno.h defined error codes. + */ +int zms_sector_use_next(struct zms_fs *fs); + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_FS_ZMS_H_ */ diff --git a/samples/subsys/fs/zms/CMakeLists.txt b/samples/subsys/fs/zms/CMakeLists.txt new file mode 100644 index 00000000000000..33644e1d903608 --- /dev/null +++ b/samples/subsys/fs/zms/CMakeLists.txt @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) + +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(zms) + + +target_sources(app PRIVATE src/main.c) +target_include_directories(app PRIVATE ${ZEPHYR_BASE}/subsys/fs/zms) diff --git a/samples/subsys/fs/zms/README.rst b/samples/subsys/fs/zms/README.rst new file mode 100644 index 00000000000000..f05d1fa0838f27 --- /dev/null +++ b/samples/subsys/fs/zms/README.rst @@ -0,0 +1,96 @@ +.. zephyr:code-sample:: zms + :name: Zephyr Memory Storage (ZMS) + :relevant-api: zms_high_level_api + + Store and retrieve data from storage using the ZMS API. + +Overview +******** + The sample shows how to use ZMS to store ID/VALUE pairs and reads them back. + Deleting an ID/VALUE pair is also shown in this sample. + + The sample stores the following items: + + #. A string representing an IP address: stored at id=1, data="192.168.1.1" + #. A binary blob representing a key/value pair: stored at id=0xbeefdead, + data={0xDE, 0xAD, 0xBE, 0xEF, 0xDE, 0xAD, 0xBE, 0xEF} + #. A variable (32bit): stored at id=2, data=cnt + #. A long set of data (128 bytes) + + A loop is executed where we mount the storage system, and then write all set + of data. + + Each DELETE_ITERATION period, we delete all set of data and verify that it has been deleted. + We generate as well incremented ID/value pairs, we store them until storage is full, then we + delete them and verify that storage is empty. + +Requirements +************ + +* A board with flash support or native_sim target + +Building and Running +******************** + +This sample can be found under :zephyr_file:`samples/subsys/fs/zms` in the Zephyr tree. + +The sample can be built for several platforms, but for the moment it has been tested only +on native_sim target + +.. zephyr-app-commands:: + :zephyr-app: samples/subsys/fs/zms + :goals: build + :compact: + +After running the generated image on a native_sim target, the output on the console shows the +multiple Iterations of read/write/delete exectuted. + +Sample Output +============= + +.. code-block:: console + + *** Booting Zephyr OS build v3.7.0-2383-g624f75400242 *** + [00:00:00.000,000] fs_zms: 3 Sectors of 4096 bytes + [00:00:00.000,000] fs_zms: alloc wra: 0, fc0 + [00:00:00.000,000] fs_zms: data wra: 0, 0 + ITERATION: 0 + Adding IP_ADDRESS 172.16.254.1 at id 1 + Adding key/value at id beefdead + Adding counter at id 2 + Adding Longarray at id 3 + [00:00:00.000,000] fs_zms: 3 Sectors of 4096 bytes + [00:00:00.000,000] fs_zms: alloc wra: 0, f80 + [00:00:00.000,000] fs_zms: data wra: 0, 8c + ITERATION: 1 + ID: 1, IP Address: 172.16.254.1 + Adding IP_ADDRESS 172.16.254.1 at id 1 + Id: beefdead, Key: de ad be ef de ad be ef + Adding key/value at id beefdead + Id: 2, loop_cnt: 0 + Adding counter at id 2 + Id: 3, Longarray: 0 1 2 3 4 5 6 7 8 9 a b c d e f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 5 + 4 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f + Adding Longarray at id 3 + . + . + . + . + . + . + [00:00:00.000,000] fs_zms: 3 Sectors of 4096 bytes + [00:00:00.000,000] fs_zms: alloc wra: 0, f40 + [00:00:00.000,000] fs_zms: data wra: 0, 80 + ITERATION: 299 + ID: 1, IP Address: 172.16.254.1 + Adding IP_ADDRESS 172.16.254.1 at id 1 + Id: beefdead, Key: de ad be ef de ad be ef + Adding key/value at id beefdead + Id: 2, loop_cnt: 298 + Adding counter at id 2 + Id: 3, Longarray: 0 1 2 3 4 5 6 7 8 9 a b c d e f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 5 + 4 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f + Adding Longarray at id 3 + Memory is full let's delete all items + Free space in storage is 8064 bytes + Sample code finished Successfully diff --git a/samples/subsys/fs/zms/prj.conf b/samples/subsys/fs/zms/prj.conf new file mode 100644 index 00000000000000..343c5021899e8a --- /dev/null +++ b/samples/subsys/fs/zms/prj.conf @@ -0,0 +1,5 @@ +CONFIG_FLASH=y +CONFIG_FLASH_MAP=y + +CONFIG_ZMS=y +CONFIG_LOG=y diff --git a/samples/subsys/fs/zms/sample.yaml b/samples/subsys/fs/zms/sample.yaml new file mode 100644 index 00000000000000..802dabcf0f1182 --- /dev/null +++ b/samples/subsys/fs/zms/sample.yaml @@ -0,0 +1,10 @@ +sample: + name: ZMS Sample + +tests: + sample.zms.basic: + tags: zms + depends_on: zms + platform_allow: + - qemu_x86 + - native_posix diff --git a/samples/subsys/fs/zms/src/main.c b/samples/subsys/fs/zms/src/main.c new file mode 100644 index 00000000000000..a2166392724c91 --- /dev/null +++ b/samples/subsys/fs/zms/src/main.c @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2024 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + * + * ZMS Sample for Zephyr using high level API. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct zms_fs fs; + +#define ZMS_PARTITION storage_partition +#define ZMS_PARTITION_DEVICE FIXED_PARTITION_DEVICE(ZMS_PARTITION) +#define ZMS_PARTITION_OFFSET FIXED_PARTITION_OFFSET(ZMS_PARTITION) + +#define IP_ADDRESS_ID 1 +#define KEY_VALUE_ID 0xbeefdead +#define CNT_ID 2 +#define LONG_DATA_ID 3 + +#define MAX_ITERATIONS 300 +#define DELETE_ITERATION 10 + +static int delete_and_verify_items(struct zms_fs *fs, uint32_t id) +{ + int rc = 0; + + rc = zms_delete(fs, id); + if (rc) { + goto error1; + } + rc = zms_get_data_length(fs, id); + if (rc > 0) { + goto error2; + } + + return 0; +error1: + printk("Error while deleting item rc=%d\n", rc); + return rc; +error2: + printk("Error, Delete failed item should not be present\n"); + return -1; +} + +static int delete_basic_items(struct zms_fs *fs) +{ + int rc = 0; + + rc = delete_and_verify_items(fs, IP_ADDRESS_ID); + if (rc) { + printk("Error while deleting item %x rc=%d\n", IP_ADDRESS_ID, rc); + return rc; + } + rc = delete_and_verify_items(fs, KEY_VALUE_ID); + if (rc) { + printk("Error while deleting item %x rc=%d\n", KEY_VALUE_ID, rc); + return rc; + } + rc = delete_and_verify_items(fs, CNT_ID); + if (rc) { + printk("Error while deleting item %x rc=%d\n", CNT_ID, rc); + return rc; + } + rc = delete_and_verify_items(fs, LONG_DATA_ID); + if (rc) { + printk("Error while deleting item %x rc=%d\n", LONG_DATA_ID, rc); + } + + return rc; +} + +int main(void) +{ + int rc = 0; + char buf[16]; + uint8_t key[8] = {0xDE, 0xAD, 0xBE, 0xEF, 0xDE, 0xAD, 0xBE, 0xEF}, longarray[128]; + uint32_t i_cnt = 0U, i; + uint32_t id = 0; + ssize_t free_space = 0; + struct flash_pages_info info; + + for (int n = 0; n < sizeof(longarray); n++) { + longarray[n] = n; + } + + /* define the zms file system by settings with: + * sector_size equal to the pagesize, + * 3 sectors + * starting at ZMS_PARTITION_OFFSET + */ + fs.flash_device = ZMS_PARTITION_DEVICE; + if (!device_is_ready(fs.flash_device)) { + printk("Storage device %s is not ready\n", fs.flash_device->name); + return 0; + } + fs.offset = ZMS_PARTITION_OFFSET; + rc = flash_get_page_info_by_offs(fs.flash_device, fs.offset, &info); + if (rc) { + printk("Unable to get page info, rc=%d\n", rc); + return 0; + } + fs.sector_size = info.size; + fs.sector_count = 3U; + + for (i = 0; i < MAX_ITERATIONS; i++) { + rc = zms_mount(&fs); + if (rc) { + printk("Storage Init failed, rc=%d\n", rc); + return 0; + } + + printk("ITERATION: %u\n", i); + /* IP_ADDRESS_ID is used to store an address, lets see if we can + * read it from flash, since we don't know the size read the + * maximum possible + */ + rc = zms_read(&fs, IP_ADDRESS_ID, &buf, sizeof(buf)); + if (rc > 0) { + /* item was found, show it */ + buf[rc] = '\0'; + printk("ID: %u, IP Address: %s\n", IP_ADDRESS_ID, buf); + } + /* Rewriting ADDRESS IP even if we found it */ + strncpy(buf, "172.16.254.1", sizeof(buf) - 1); + printk("Adding IP_ADDRESS %s at id %u\n", buf, IP_ADDRESS_ID); + rc = zms_write(&fs, IP_ADDRESS_ID, &buf, strlen(buf)); + if (rc < 0) { + printk("Error while writing Entry rc=%d\n", rc); + break; + } + + /* KEY_VALUE_ID is used to store a key/value pair , lets see if we can read + * it from storage. + */ + rc = zms_read(&fs, KEY_VALUE_ID, &key, sizeof(key)); + if (rc > 0) { /* item was found, show it */ + printk("Id: %x, Key: ", KEY_VALUE_ID); + for (int n = 0; n < 8; n++) { + printk("%x ", key[n]); + } + printk("\n"); + } + /* Rewriting KEY_VALUE even if we found it */ + printk("Adding key/value at id %x\n", KEY_VALUE_ID); + rc = zms_write(&fs, KEY_VALUE_ID, &key, sizeof(key)); + if (rc < 0) { + printk("Error while writing Entry rc=%d\n", rc); + break; + } + + /* CNT_ID is used to store the loop counter, lets see + * if we can read it from storage + */ + rc = zms_read(&fs, CNT_ID, &i_cnt, sizeof(i_cnt)); + if (rc > 0) { /* item was found, show it */ + printk("Id: %d, loop_cnt: %u\n", CNT_ID, i_cnt); + if (i_cnt != (i - 1)) { + break; + } + } + printk("Adding counter at id %u\n", CNT_ID); + rc = zms_write(&fs, CNT_ID, &i, sizeof(i)); + if (rc < 0) { + printk("Error while writing Entry rc=%d\n", rc); + break; + } + + /* LONG_DATA_ID is used to store a larger dataset ,lets see if we can read + * it from flash + */ + rc = zms_read(&fs, LONG_DATA_ID, &longarray, sizeof(longarray)); + if (rc > 0) { + /* item was found, show it */ + printk("Id: %d, Longarray: ", LONG_DATA_ID); + for (int n = 0; n < sizeof(longarray); n++) { + printk("%x ", longarray[n]); + } + printk("\n"); + } + /* Rewrite the entry even if we found it */ + printk("Adding Longarray at id %d\n", LONG_DATA_ID); + rc = zms_write(&fs, LONG_DATA_ID, &longarray, sizeof(longarray)); + if (rc < 0) { + printk("Error while writing Entry rc=%d\n", rc); + break; + } + + /* Each DELETE_ITERATION delete all basic items */ + if (!(i % DELETE_ITERATION) && (i)) { + rc = delete_basic_items(&fs); + if (rc) { + break; + } + } + } + + if (i != MAX_ITERATIONS) { + printk("Error: Something went wrong at iteration %u rc=%d\n", i - 1, rc); + return 0; + } + + while (1) { + /* fill all storage */ + rc = zms_write(&fs, id, &id, sizeof(uint32_t)); + if (rc < 0) { + break; + } + id++; + } + + if (rc == -ENOSPC) { + /* Calculate free space and verify that it is 0 */ + free_space = zms_calc_free_space(&fs); + if (free_space < 0) { + printk("Error while computing free space, rc=%d\n", free_space); + return 0; + } + if (free_space > 0) { + printk("Error: free_space should be 0, computed %u\n", free_space); + return 0; + } + printk("Memory is full let's delete all items\n"); + + /* Now delete all previously written items */ + for (uint32_t n = 0; n < id; n++) { + rc = delete_and_verify_items(&fs, n); + if (rc) { + printk("Error deleting at id %u\n", n); + return 0; + } + } + rc = delete_basic_items(&fs); + if (rc) { + printk("Error deleting basic items\n"); + return 0; + } + } + + /* + * Let's compute free space in storage. But before doing that let's Garbage collect + * all sectors where we deleted all entries and then compute the free space + */ + for (uint32_t i = 0; i < fs.sector_count; i++) { + rc = zms_sector_use_next(&fs); + if (rc) { + printk("Error while changing sector rc=%d\n", rc); + } + } + free_space = zms_calc_free_space(&fs); + if (free_space < 0) { + printk("Error while computing free space, rc=%d\n", free_space); + return 0; + } + printk("Free space in storage is %u bytes\n", free_space); + printk("Sample code finished Successfully\n"); + + return 0; +} diff --git a/subsys/fs/CMakeLists.txt b/subsys/fs/CMakeLists.txt index 574f9b26c923a4..15f97649a13d56 100644 --- a/subsys/fs/CMakeLists.txt +++ b/subsys/fs/CMakeLists.txt @@ -27,6 +27,7 @@ endif() add_subdirectory_ifdef(CONFIG_FCB ./fcb) add_subdirectory_ifdef(CONFIG_NVS ./nvs) +add_subdirectory_ifdef(CONFIG_ZMS ./zms) if(CONFIG_FUSE_FS_ACCESS) zephyr_library_named(FS_FUSE) diff --git a/subsys/fs/Kconfig b/subsys/fs/Kconfig index af92a9bab9a9eb..83e4185ea7514f 100644 --- a/subsys/fs/Kconfig +++ b/subsys/fs/Kconfig @@ -110,5 +110,6 @@ endif # FILE_SYSTEM rsource "fcb/Kconfig" rsource "nvs/Kconfig" +rsource "zms/Kconfig" endmenu diff --git a/subsys/fs/zms/CMakeLists.txt b/subsys/fs/zms/CMakeLists.txt new file mode 100644 index 00000000000000..b6db8a3f57fa9a --- /dev/null +++ b/subsys/fs/zms/CMakeLists.txt @@ -0,0 +1,3 @@ +#SPDX-License-Identifier: Apache-2.0 + +zephyr_sources(zms.c) diff --git a/subsys/fs/zms/Kconfig b/subsys/fs/zms/Kconfig new file mode 100644 index 00000000000000..330ef11155ccc7 --- /dev/null +++ b/subsys/fs/zms/Kconfig @@ -0,0 +1,57 @@ +#Zephyr Memory Storage ZMS + +#Copyright (c) 2024 BayLibre SAS + +#SPDX-License-Identifier: Apache-2.0 + +config ZMS + bool "Zephyr Memory Storage" + select CRC + help + Enable support of Zephyr Memory Storage. + +if ZMS + +config ZMS_LOOKUP_CACHE + bool "ZMS lookup cache" + help + Enable ZMS cache to reduce the ZMS data lookup time. + Each cache entry holds an address of the most recent allocation + table entry (ATE) for all ZMS IDs that fall into that cache position. + +config ZMS_LOOKUP_CACHE_SIZE + int "ZMS Storage lookup cache size" + default 128 + range 1 65536 + depends on ZMS_LOOKUP_CACHE + help + Number of entries in ZMS lookup cache. + It is recommended that it should be a power of 2. + Every additional entry in cache will add 8 bytes in RAM + +config ZMS_DATA_CRC + bool "ZMS DATA CRC" + help + Enables DATA CRC + +config ZMS_CUSTOM_BLOCK_SIZE + bool "Custom buffer size used by ZMS for reads and writes" + help + ZMS uses internal buffers to read/write and compare stored data. + Increasing the size of these buffers should be done carefully in order to not + overflow the stack. + Increasing this buffer means as well that ZMS could work with storage devices + that have larger write-block-size which decreases ZMS performance + +config ZMS_MAX_BLOCK_SIZE + int "ZMS internal buffer size" + default 32 + depends on ZMS_CUSTOM_BLOCK_SIZE + help + Changes the internal buffer size of ZMS + +module = ZMS +module-str = zms +source "subsys/logging/Kconfig.template.log_config" + +endif # ZMS diff --git a/subsys/fs/zms/zms.c b/subsys/fs/zms/zms.c new file mode 100644 index 00000000000000..219303131093be --- /dev/null +++ b/subsys/fs/zms/zms.c @@ -0,0 +1,1752 @@ +/* ZMS: Zephyr Memory Storage + * + * Copyright (c) 2024 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include "zms_priv.h" + +#include +LOG_MODULE_REGISTER(fs_zms, CONFIG_ZMS_LOG_LEVEL); + +static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate); +static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry); +static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt); +static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, + struct zms_ate *close_ate); +static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry, + uint8_t cycle_cnt); + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + +static inline size_t zms_lookup_cache_pos(uint32_t id) +{ + uint32_t hash; + + /* 32-bit integer hash function found by https://github.com/skeeto/hash-prospector. */ + hash = id; + hash ^= hash >> 16; + hash *= 0x7feb352dU; + hash ^= hash >> 15; + hash *= 0x846ca68bU; + hash ^= hash >> 16; + + return hash % CONFIG_ZMS_LOOKUP_CACHE_SIZE; +} + +static int zms_lookup_cache_rebuild(struct zms_fs *fs) +{ + int rc, previous_sector_num = ZMS_INVALID_SECTOR_NUM; + uint64_t addr, ate_addr; + uint64_t *cache_entry; + uint8_t current_cycle; + struct zms_ate ate; + + memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache)); + addr = fs->ate_wra; + + while (true) { + /* Make a copy of 'addr' as it will be advanced by zms_prev_ate() */ + ate_addr = addr; + rc = zms_prev_ate(fs, &addr, &ate); + + if (rc) { + return rc; + } + + cache_entry = &fs->lookup_cache[zms_lookup_cache_pos(ate.id)]; + + if (ate.id != ZMS_HEAD_ID && *cache_entry == ZMS_LOOKUP_CACHE_NO_ADDR) { + /* read the ate cycle only when we change the sector + * or if it is the first read + */ + if (SECTOR_NUM(ate_addr) != previous_sector_num) { + rc = zms_get_sector_cycle(fs, ate_addr, ¤t_cycle); + if (rc == -ENOENT) { + /* sector never used */ + current_cycle = 0; + } else if (rc) { + /* bad flash read */ + return rc; + } + } + if (zms_ate_valid_different_sector(fs, &ate, current_cycle)) { + *cache_entry = ate_addr; + } + previous_sector_num = SECTOR_NUM(ate_addr); + } + + if (addr == fs->ate_wra) { + break; + } + } + + return 0; +} + +static void zms_lookup_cache_invalidate(struct zms_fs *fs, uint32_t sector) +{ + uint64_t *cache_entry = fs->lookup_cache; + uint64_t *const cache_end = &fs->lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE]; + + for (; cache_entry < cache_end; ++cache_entry) { + if (SECTOR_NUM(*cache_entry) == sector) { + *cache_entry = ZMS_LOOKUP_CACHE_NO_ADDR; + } + } +} + +#endif /* CONFIG_ZMS_LOOKUP_CACHE */ + +/* Helper to compute offset given the address */ +static inline off_t zms_addr_to_offset(struct zms_fs *fs, uint64_t addr) +{ + return fs->offset + (fs->sector_size * SECTOR_NUM(addr)) + SECTOR_OFFSET(addr); +} + +/* zms_al_size returns size aligned to fs->write_block_size */ +static inline size_t zms_al_size(struct zms_fs *fs, size_t len) +{ + size_t write_block_size = fs->flash_parameters->write_block_size; + + if (write_block_size <= 1U) { + return len; + } + return (len + (write_block_size - 1U)) & ~(write_block_size - 1U); +} + +/* Helper to get empty ATE address */ +static inline uint64_t zms_empty_ate_addr(struct zms_fs *fs, uint64_t addr) +{ + return (addr & ADDR_SECT_MASK) + fs->sector_size - fs->ate_size; +} + +/* Helper to get close ATE address */ +static inline uint64_t zms_close_ate_addr(struct zms_fs *fs, uint64_t addr) +{ + return (addr & ADDR_SECT_MASK) + fs->sector_size - 2 * fs->ate_size; +} + +/* Aligned memory write */ +static int zms_flash_al_wrt(struct zms_fs *fs, uint64_t addr, const void *data, size_t len) +{ + const uint8_t *data8 = (const uint8_t *)data; + int rc = 0; + off_t offset; + size_t blen; + uint8_t buf[ZMS_BLOCK_SIZE]; + + if (!len) { + /* Nothing to write, avoid changing the flash protection */ + return 0; + } + + offset = zms_addr_to_offset(fs, addr); + + blen = len & ~(fs->flash_parameters->write_block_size - 1U); + if (blen > 0) { + rc = flash_write(fs->flash_device, offset, data8, blen); + if (rc) { + /* flash write error */ + goto end; + } + len -= blen; + offset += blen; + data8 += blen; + } + if (len) { + memcpy(buf, data8, len); + (void)memset(buf + len, fs->flash_parameters->erase_value, + fs->flash_parameters->write_block_size - len); + + rc = flash_write(fs->flash_device, offset, buf, + fs->flash_parameters->write_block_size); + } + +end: + return rc; +} + +/* basic flash read from zms address */ +static int zms_flash_rd(struct zms_fs *fs, uint64_t addr, void *data, size_t len) +{ + off_t offset; + + offset = zms_addr_to_offset(fs, addr); + + return flash_read(fs->flash_device, offset, data, len); +} + +/* allocation entry write */ +static int zms_flash_ate_wrt(struct zms_fs *fs, const struct zms_ate *entry) +{ + int rc; + + rc = zms_flash_al_wrt(fs, fs->ate_wra, entry, sizeof(struct zms_ate)); + if (rc) { + goto end; + } +#ifdef CONFIG_ZMS_LOOKUP_CACHE + /* 0xFFFFFFFF is a special-purpose identifier. Exclude it from the cache */ + if (entry->id != ZMS_HEAD_ID) { + fs->lookup_cache[zms_lookup_cache_pos(entry->id)] = fs->ate_wra; + } +#endif + fs->ate_wra -= zms_al_size(fs, sizeof(struct zms_ate)); +end: + return rc; +} + +/* data write */ +static int zms_flash_data_wrt(struct zms_fs *fs, const void *data, size_t len) +{ + int rc; + + rc = zms_flash_al_wrt(fs, fs->data_wra, data, len); + if (rc < 0) { + return rc; + } + fs->data_wra += zms_al_size(fs, len); + + return 0; +} + +/* flash ate read */ +static int zms_flash_ate_rd(struct zms_fs *fs, uint64_t addr, struct zms_ate *entry) +{ + return zms_flash_rd(fs, addr, entry, sizeof(struct zms_ate)); +} + +/* zms_flash_block_cmp compares the data in flash at addr to data + * in blocks of size ZMS_BLOCK_SIZE aligned to fs->write_block_size + * returns 0 if equal, 1 if not equal, errcode if error + */ +static int zms_flash_block_cmp(struct zms_fs *fs, uint64_t addr, const void *data, size_t len) +{ + const uint8_t *data8 = (const uint8_t *)data; + int rc; + size_t bytes_to_cmp, block_size; + uint8_t buf[ZMS_BLOCK_SIZE]; + + block_size = ZMS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); + + while (len) { + bytes_to_cmp = MIN(block_size, len); + rc = zms_flash_rd(fs, addr, buf, bytes_to_cmp); + if (rc) { + return rc; + } + rc = memcmp(data8, buf, bytes_to_cmp); + if (rc) { + return 1; + } + len -= bytes_to_cmp; + addr += bytes_to_cmp; + data8 += bytes_to_cmp; + } + return 0; +} + +/* zms_flash_cmp_const compares the data in flash at addr to a constant + * value. returns 0 if all data in flash is equal to value, 1 if not equal, + * errcode if error + */ +static int zms_flash_cmp_const(struct zms_fs *fs, uint64_t addr, uint8_t value, size_t len) +{ + int rc; + size_t bytes_to_cmp, block_size; + uint8_t cmp[ZMS_BLOCK_SIZE]; + + block_size = ZMS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); + + (void)memset(cmp, value, block_size); + while (len) { + bytes_to_cmp = MIN(block_size, len); + rc = zms_flash_block_cmp(fs, addr, cmp, bytes_to_cmp); + if (rc) { + return rc; + } + len -= bytes_to_cmp; + addr += bytes_to_cmp; + } + return 0; +} + +/* flash block move: move a block at addr to the current data write location + * and updates the data write location. + */ +static int zms_flash_block_move(struct zms_fs *fs, uint64_t addr, size_t len) +{ + int rc; + size_t bytes_to_copy, block_size; + uint8_t buf[ZMS_BLOCK_SIZE]; + + block_size = ZMS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); + + while (len) { + bytes_to_copy = MIN(block_size, len); + rc = zms_flash_rd(fs, addr, buf, bytes_to_copy); + if (rc) { + return rc; + } + rc = zms_flash_data_wrt(fs, buf, bytes_to_copy); + if (rc) { + return rc; + } + len -= bytes_to_copy; + addr += bytes_to_copy; + } + return 0; +} + +/* erase a sector and verify erase was OK. + * return 0 if OK, errorcode on error. + */ +static int zms_flash_erase_sector(struct zms_fs *fs, uint64_t addr) +{ + int rc; + off_t offset; + bool ebw_required = + flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT; + + if (!ebw_required) { + /* Do nothing for devices that do not have erase capability */ + return 0; + } + + addr &= ADDR_SECT_MASK; + offset = zms_addr_to_offset(fs, addr); + + LOG_DBG("Erasing flash at offset 0x%lx ( 0x%llx ), len %u", (long)offset, addr, + fs->sector_size); + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + zms_lookup_cache_invalidate(fs, SECTOR_NUM(addr)); +#endif + rc = flash_erase(fs->flash_device, offset, fs->sector_size); + + if (rc) { + return rc; + } + + if (zms_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value, fs->sector_size)) { + LOG_ERR("Failure while erasing the sector at offset 0x%lx", (long)offset); + rc = -ENXIO; + } + + return rc; +} + +/* crc update on allocation entry */ +static void zms_ate_crc8_update(struct zms_ate *entry) +{ + uint8_t crc8; + + /* crc8 field is the first element of the structure, do not include it */ + crc8 = crc8_ccitt(0xff, (uint8_t *)entry + SIZEOF_FIELD(struct zms_ate, crc8), + sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); + entry->crc8 = crc8; +} + +/* crc check on allocation entry + * returns 0 if OK, 1 on crc fail + */ +static int zms_ate_crc8_check(const struct zms_ate *entry) +{ + uint8_t crc8; + + /* crc8 field is the first element of the structure, do not include it */ + crc8 = crc8_ccitt(0xff, (uint8_t *)entry + SIZEOF_FIELD(struct zms_ate, crc8), + sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); + if (crc8 == entry->crc8) { + return 0; + } + + return 1; +} + +/* zms_ate_valid validates an ate: + * return 1 if crc8 and cycle_cnt valid, + * 0 otherwise + */ +static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) +{ + if ((fs->sector_cycle != entry->cycle_cnt) || zms_ate_crc8_check(entry)) { + return 0; + } + + return 1; +} + +/* zms_ate_valid_different_sector validates an ate that is in a different + * sector than the active one. It takes as argument the cycle_cnt of the + * sector where the ATE to be validated is stored + * return 1 if crc8 and cycle_cnt are valid, + * 0 otherwise + */ +static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry, + uint8_t cycle_cnt) +{ + if ((cycle_cnt != entry->cycle_cnt) || zms_ate_crc8_check(entry)) { + return 0; + } + + return 1; +} + +static inline int zms_get_cycle_on_sector_change(struct zms_fs *fs, uint64_t addr, + int previous_sector_num, uint8_t *cycle_cnt) +{ + int rc; + + /* read the ate cycle only when we change the sector + * or if it is the first read + */ + if (SECTOR_NUM(addr) != previous_sector_num) { + rc = zms_get_sector_cycle(fs, addr, cycle_cnt); + if (rc == -ENOENT) { + /* sector never used */ + *cycle_cnt = 0; + } else if (rc) { + /* bad flash read */ + return rc; + } + } + + return 0; +} + +/* zms_close_ate_valid validates an sector close ate: a valid sector close ate: + * - valid ate + * - len = 0 and id = ZMS_HEAD_ID + * - offset points to location at ate multiple from sector size + * return true if valid, false otherwise + */ +static bool zms_close_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) +{ + return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) && + (entry->id == ZMS_HEAD_ID) && !((fs->sector_size - entry->offset) % fs->ate_size)); +} + +/* zms_empty_ate_valid validates an sector empty ate: a valid sector empty ate: + * - valid ate + * - len = 0xffff and id = 0xffffffff + * return true if valid, false otherwise + */ +static bool zms_empty_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) +{ + return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && + (entry->len == 0xffff) && (entry->id == ZMS_HEAD_ID)); +} + +/* zms_gc_done_ate_valid validates a garbage collector done ATE + * Valid gc_done_ate: + * - valid ate + * - len = 0 + * - id = 0xffffffff + * return true if valid, false otherwise + */ +static bool zms_gc_done_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) +{ + return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) && + (entry->id == ZMS_HEAD_ID)); +} + +/* Read empty and close ATE of the sector where belongs address "addr" and + * validates that the sector is closed. + * retval: 0 if sector is not close + * retval: 1 is sector is closed + * retval: < 0 if read of the header failed. + */ +static int zms_validate_closed_sector(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, + struct zms_ate *close_ate) +{ + int rc; + + /* read the header ATEs */ + rc = zms_get_sector_header(fs, addr, empty_ate, close_ate); + if (rc) { + return rc; + } + + if (zms_empty_ate_valid(fs, empty_ate) && zms_close_ate_valid(fs, close_ate) && + (empty_ate->cycle_cnt == close_ate->cycle_cnt)) { + /* Closed sector validated */ + return 1; + } + + return 0; +} + +/* store an entry in flash */ +static int zms_flash_write_entry(struct zms_fs *fs, uint32_t id, const void *data, size_t len) +{ + int rc; + struct zms_ate entry; + + /* Initialize all members to 0 */ + memset(&entry, 0, sizeof(struct zms_ate)); + + entry.id = id; + entry.len = (uint16_t)len; + entry.cycle_cnt = fs->sector_cycle; + + if (len > ZMS_DATA_IN_ATE_SIZE) { + /* only compute CRC if len is greater than 8 bytes */ + if (IS_ENABLED(CONFIG_ZMS_DATA_CRC)) { + entry.data_crc = crc32_ieee(data, len); + } + entry.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); + } else if ((len > 0) && (len <= ZMS_DATA_IN_ATE_SIZE)) { + /* Copy data into entry for small data ( < 8B) */ + memcpy(&entry.data, data, len); + } + + zms_ate_crc8_update(&entry); + + if (len > ZMS_DATA_IN_ATE_SIZE) { + rc = zms_flash_data_wrt(fs, data, len); + if (rc) { + return rc; + } + } + + rc = zms_flash_ate_wrt(fs, &entry); + if (rc) { + return rc; + } + + return 0; +} + +/* end of flash routines */ + +/* Search for the last valid ATE written in a sector and also update data write address + */ +static int zms_recover_last_ate(struct zms_fs *fs, uint64_t *addr, uint64_t *data_wra) +{ + uint64_t data_end_addr, ate_end_addr; + struct zms_ate end_ate; + int rc; + + LOG_DBG("Recovering last ate from sector %llu", SECTOR_NUM(*addr)); + + /* skip close and empty ATE */ + *addr -= 2 * fs->ate_size; + + ate_end_addr = *addr; + data_end_addr = *addr & ADDR_SECT_MASK; + /* Initialize the data_wra to the first address of the sector */ + *data_wra = data_end_addr; + + while (ate_end_addr > data_end_addr) { + rc = zms_flash_ate_rd(fs, ate_end_addr, &end_ate); + if (rc) { + return rc; + } + if (zms_ate_valid(fs, &end_ate)) { + /* found a valid ate, update data_end_addr and *addr */ + data_end_addr &= ADDR_SECT_MASK; + if (end_ate.len > ZMS_DATA_IN_ATE_SIZE) { + data_end_addr += end_ate.offset + zms_al_size(fs, end_ate.len); + *data_wra = data_end_addr; + } + *addr = ate_end_addr; + } + ate_end_addr -= fs->ate_size; + } + + return 0; +} + +/* compute previous addr of ATE */ +static int zms_compute_prev_addr(struct zms_fs *fs, uint64_t *addr) +{ + int sec_closed; + struct zms_ate empty_ate, close_ate; + + *addr += fs->ate_size; + if ((SECTOR_OFFSET(*addr)) != (fs->sector_size - 2 * fs->ate_size)) { + return 0; + } + + /* last ate in sector, do jump to previous sector */ + if (SECTOR_NUM(*addr) == 0U) { + *addr += ((uint64_t)(fs->sector_count - 1) << ADDR_SECT_SHIFT); + } else { + *addr -= (1ULL << ADDR_SECT_SHIFT); + } + + /* verify if the sector is closed */ + sec_closed = zms_validate_closed_sector(fs, *addr, &empty_ate, &close_ate); + if (sec_closed < 0) { + return sec_closed; + } + + /* Non Closed Sector */ + if (!sec_closed) { + /* at the end of filesystem */ + *addr = fs->ate_wra; + return 0; + } + + /* Update the address here because the header ATEs are valid.*/ + (*addr) &= ADDR_SECT_MASK; + (*addr) += close_ate.offset; + + return 0; +} + +/* walking through allocation entry list, from newest to oldest entries + * read ate from addr, modify addr to the previous ate + */ +static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate) +{ + int rc; + + rc = zms_flash_ate_rd(fs, *addr, ate); + if (rc) { + return rc; + } + + return zms_compute_prev_addr(fs, addr); +} + +static void zms_sector_advance(struct zms_fs *fs, uint64_t *addr) +{ + *addr += (1ULL << ADDR_SECT_SHIFT); + if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) { + *addr -= ((uint64_t)fs->sector_count << ADDR_SECT_SHIFT); + } +} + +/* allocation entry close (this closes the current sector) by writing offset + * of last ate to the sector end. + */ +static int zms_sector_close(struct zms_fs *fs) +{ + int rc; + struct zms_ate close_ate, garbage_ate; + + close_ate.id = ZMS_HEAD_ID; + close_ate.len = 0U; + close_ate.offset = (uint32_t)SECTOR_OFFSET(fs->ate_wra + fs->ate_size); + close_ate.metadata = 0xffffffff; + close_ate.cycle_cnt = fs->sector_cycle; + + /* When we close the sector, we must write all non used ATE with + * a non valid (Junk) ATE. + * This is needed to avoid some corner cases where some ATEs are + * not overwritten and become valid when the cycle counter wrap again + * to the same cycle counter of the old ATE. + * Example : + * - An ATE.cycl_cnt == 0 is written as last ATE of the sector + - This ATE was never overwritten in the next 255 cycles because of + large data size + - Next 256th cycle the leading cycle_cnt is 0, this ATE becomes + valid even if it is not the case. + */ + memset(&garbage_ate, fs->flash_parameters->erase_value, sizeof(garbage_ate)); + while (SECTOR_OFFSET(fs->ate_wra) && (fs->ate_wra >= fs->data_wra)) { + rc = zms_flash_ate_wrt(fs, &garbage_ate); + if (rc) { + return rc; + } + } + + fs->ate_wra = zms_close_ate_addr(fs, fs->ate_wra); + + zms_ate_crc8_update(&close_ate); + + (void)zms_flash_ate_wrt(fs, &close_ate); + + zms_sector_advance(fs, &fs->ate_wra); + + rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); + if (rc == -ENOENT) { + /* sector never used */ + fs->sector_cycle = 0; + } else if (rc) { + /* bad flash read */ + return rc; + } + + fs->data_wra = fs->ate_wra & ADDR_SECT_MASK; + + return 0; +} + +static int zms_add_gc_done_ate(struct zms_fs *fs) +{ + struct zms_ate gc_done_ate; + + LOG_DBG("Adding gc done ate at %llx", fs->ate_wra); + gc_done_ate.id = ZMS_HEAD_ID; + gc_done_ate.len = 0U; + gc_done_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); + gc_done_ate.metadata = 0xffffffff; + gc_done_ate.cycle_cnt = fs->sector_cycle; + + zms_ate_crc8_update(&gc_done_ate); + + return zms_flash_ate_wrt(fs, &gc_done_ate); +} + +static int zms_add_empty_ate(struct zms_fs *fs, uint64_t addr) +{ + struct zms_ate empty_ate; + uint8_t cycle_cnt; + int rc = 0; + uint64_t previous_ate_wra; + + addr &= ADDR_SECT_MASK; + + LOG_DBG("Adding empty ate at %llx", (uint64_t)(addr + fs->sector_size - fs->ate_size)); + empty_ate.id = ZMS_HEAD_ID; + empty_ate.len = 0xffff; + empty_ate.offset = 0U; + empty_ate.metadata = + FIELD_PREP(ZMS_MAGIC_NUMBER_MASK, ZMS_MAGIC_NUMBER) | ZMS_DEFAULT_VERSION; + + rc = zms_get_sector_cycle(fs, addr, &cycle_cnt); + if (rc == -ENOENT) { + /* sector never used */ + cycle_cnt = 0; + } else if (rc) { + /* bad flash read */ + return rc; + } + + /* increase cycle counter */ + empty_ate.cycle_cnt = (cycle_cnt + 1) % BIT(8); + zms_ate_crc8_update(&empty_ate); + + /* Adding empty ate to this sector changes fs->ate_wra value + * Restore the ate_wra of the current sector after this + */ + previous_ate_wra = fs->ate_wra; + fs->ate_wra = zms_empty_ate_addr(fs, addr); + rc = zms_flash_ate_wrt(fs, &empty_ate); + if (rc) { + return rc; + } + fs->ate_wra = previous_ate_wra; + + return 0; +} + +static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt) +{ + int rc; + struct zms_ate empty_ate; + uint64_t empty_addr; + + empty_addr = zms_empty_ate_addr(fs, addr); + + /* read the cycle counter of the current sector */ + rc = zms_flash_ate_rd(fs, empty_addr, &empty_ate); + if (rc < 0) { + /* flash error */ + return rc; + } + + if (zms_empty_ate_valid(fs, &empty_ate)) { + *cycle_cnt = empty_ate.cycle_cnt; + return 0; + } + + /* there is no empty ATE in this sector */ + return -ENOENT; +} + +static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, + struct zms_ate *close_ate) +{ + int rc; + uint64_t close_addr; + + close_addr = zms_close_ate_addr(fs, addr); + /* read the second ate in the sector to get the close ATE */ + rc = zms_flash_ate_rd(fs, close_addr, close_ate); + if (rc) { + return rc; + } + + /* read the first ate in the sector to get the empty ATE */ + rc = zms_flash_ate_rd(fs, close_addr + fs->ate_size, empty_ate); + if (rc) { + return rc; + } + + return 0; +} + +/** + * @brief Helper to find an ATE using its ID + * + * @param fs Pointer to file system + * @param id Id of the entry to be found + * @param start_addr Address from where the search will start + * @param end_addr Address where the search will stop + * @param ate pointer to the found ATE if it exists + * @param ate_addr Pointer to the address of the found ATE + * + * @retval 0 No ATE is found + * @retval 1 valid ATE with same ID found + * @retval < 0 An error happened + */ +static int zms_find_ate_with_id(struct zms_fs *fs, uint32_t id, uint64_t start_addr, + uint64_t end_addr, struct zms_ate *ate, uint64_t *ate_addr) +{ + int rc; + int previous_sector_num = ZMS_INVALID_SECTOR_NUM; + uint64_t wlk_prev_addr, wlk_addr; + int prev_found = 0; + struct zms_ate wlk_ate; + uint8_t current_cycle; + + wlk_addr = start_addr; + + do { + wlk_prev_addr = wlk_addr; + rc = zms_prev_ate(fs, &wlk_addr, &wlk_ate); + if (rc) { + return rc; + } + if (wlk_ate.id == id) { + /* read the ate cycle only when we change the sector or if it is + * the first read ( previous_sector_num == ZMS_INVALID_SECTOR_NUM). + */ + rc = zms_get_cycle_on_sector_change(fs, wlk_prev_addr, previous_sector_num, + ¤t_cycle); + if (rc) { + return rc; + } + if (zms_ate_valid_different_sector(fs, &wlk_ate, current_cycle)) { + prev_found = 1; + break; + } + previous_sector_num = SECTOR_NUM(wlk_prev_addr); + } + } while (wlk_addr != end_addr); + + *ate = wlk_ate; + *ate_addr = wlk_prev_addr; + + return prev_found; +} + +/* garbage collection: the address ate_wra has been updated to the new sector + * that has just been started. The data to gc is in the sector after this new + * sector. + */ +static int zms_gc(struct zms_fs *fs) +{ + int rc, sec_closed; + struct zms_ate close_ate, gc_ate, wlk_ate, empty_ate; + uint64_t sec_addr, gc_addr, gc_prev_addr, wlk_addr, wlk_prev_addr, data_addr, stop_addr; + uint8_t previous_cycle = 0; + + rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); + if (rc == -ENOENT) { + /* Erase this new unused sector if needed */ + rc = zms_flash_erase_sector(fs, fs->ate_wra); + if (rc) { + return rc; + } + /* sector never used */ + rc = zms_add_empty_ate(fs, fs->ate_wra); + if (rc) { + return rc; + } + /* At this step we are sure that empty ATE exist. + * If not, then there is an I/O problem. + */ + rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); + if (rc) { + return rc; + } + } else if (rc) { + /* bad flash read */ + return rc; + } + previous_cycle = fs->sector_cycle; + + sec_addr = (fs->ate_wra & ADDR_SECT_MASK); + zms_sector_advance(fs, &sec_addr); + gc_addr = sec_addr + fs->sector_size - fs->ate_size; + + /* verify if the sector is closed */ + sec_closed = zms_validate_closed_sector(fs, gc_addr, &empty_ate, &close_ate); + if (sec_closed < 0) { + return sec_closed; + } + + /* if the sector is not closed don't do gc */ + if (!sec_closed) { + goto gc_done; + } + + /* update sector_cycle */ + fs->sector_cycle = empty_ate.cycle_cnt; + + /* stop_addr points to the first ATE before the header ATEs */ + stop_addr = gc_addr - 2 * fs->ate_size; + /* At this step empty & close ATEs are valid. + * let's start the GC + */ + gc_addr &= ADDR_SECT_MASK; + gc_addr += close_ate.offset; + + do { + gc_prev_addr = gc_addr; + rc = zms_prev_ate(fs, &gc_addr, &gc_ate); + if (rc) { + return rc; + } + + if (!zms_ate_valid(fs, &gc_ate) || !gc_ate.len) { + continue; + } + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(gc_ate.id)]; + + if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) { + wlk_addr = fs->ate_wra; + } +#else + wlk_addr = fs->ate_wra; +#endif + + /* Initialize the wlk_prev_addr as if no previous ID will be found */ + wlk_prev_addr = gc_prev_addr; + /* Search for a previous valid ATE with the same ID. If it doesn't exist + * then wlk_prev_addr will be equal to gc_prev_addr. + */ + rc = zms_find_ate_with_id(fs, gc_ate.id, wlk_addr, fs->ate_wra, &wlk_ate, + &wlk_prev_addr); + if (rc < 0) { + return rc; + } + + /* if walk_addr has reached the same address as gc_addr, a copy is + * needed unless it is a deleted item. + */ + if (wlk_prev_addr == gc_prev_addr) { + /* copy needed */ + LOG_DBG("Moving %d, len %d", gc_ate.id, gc_ate.len); + + if (gc_ate.len > ZMS_DATA_IN_ATE_SIZE) { + /* Copy Data only when len > 8 + * Otherwise, Data is already inside ATE + */ + data_addr = (gc_prev_addr & ADDR_SECT_MASK); + data_addr += gc_ate.offset; + gc_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); + + rc = zms_flash_block_move(fs, data_addr, gc_ate.len); + if (rc) { + return rc; + } + } + + gc_ate.cycle_cnt = previous_cycle; + zms_ate_crc8_update(&gc_ate); + rc = zms_flash_ate_wrt(fs, &gc_ate); + if (rc) { + return rc; + } + } + } while (gc_prev_addr != stop_addr); + +gc_done: + + /* restore the previous sector_cycle */ + fs->sector_cycle = previous_cycle; + + /* Write a GC_done ATE to mark the end of this operation + */ + + rc = zms_add_gc_done_ate(fs); + if (rc) { + return rc; + } + + /* Erase the GC'ed sector when needed */ + rc = zms_flash_erase_sector(fs, sec_addr); + if (rc) { + return rc; + } + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + zms_lookup_cache_invalidate(fs, sec_addr >> ADDR_SECT_SHIFT); +#endif + rc = zms_add_empty_ate(fs, sec_addr); + + return rc; +} + +int zms_clear(struct zms_fs *fs) +{ + int rc; + uint64_t addr; + + if (!fs->ready) { + LOG_ERR("zms not initialized"); + return -EACCES; + } + + k_mutex_lock(&fs->zms_lock, K_FOREVER); + for (uint32_t i = 0; i < fs->sector_count; i++) { + addr = (uint64_t)i << ADDR_SECT_SHIFT; + rc = zms_flash_erase_sector(fs, addr); + if (rc) { + goto end; + } + rc = zms_add_empty_ate(fs, addr); + if (rc) { + goto end; + } + } + + /* zms needs to be reinitialized after clearing */ + fs->ready = false; + +end: + k_mutex_unlock(&fs->zms_lock); + + return 0; +} + +static int zms_init(struct zms_fs *fs) +{ + int rc, sec_closed; + struct zms_ate last_ate, first_ate, close_ate, empty_ate; + /* Initialize addr to 0 for the case fs->sector_count == 0. This + * should never happen as this is verified in zms_mount() but both + * Coverity and GCC believe the contrary. + */ + uint64_t addr = 0U, data_wra = 0U; + uint32_t i, closed_sectors = 0; + bool zms_magic_exist = false; + + k_mutex_lock(&fs->zms_lock, K_FOREVER); + + /* step through the sectors to find a open sector following + * a closed sector, this is where zms can write. + */ + + for (i = 0; i < fs->sector_count; i++) { + addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT)); + + /* verify if the sector is closed */ + sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); + if (sec_closed < 0) { + rc = sec_closed; + goto end; + } + /* update cycle count */ + fs->sector_cycle = empty_ate.cycle_cnt; + + if (sec_closed == 1) { + /* closed sector */ + closed_sectors++; + /* Let's verify that this is a ZMS storage system */ + if (ZMS_GET_MAGIC_NUMBER(empty_ate.metadata) == ZMS_MAGIC_NUMBER) { + zms_magic_exist = true; + /* Let's check that we support this ZMS version */ + if (ZMS_GET_VERSION(empty_ate.metadata) != ZMS_DEFAULT_VERSION) { + LOG_ERR("ZMS Version is not supported"); + rc = -ENOEXEC; + goto end; + } + } + + zms_sector_advance(fs, &addr); + /* addr is pointing to the close ATE */ + /* verify if the sector is Open */ + sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); + if (sec_closed < 0) { + rc = sec_closed; + goto end; + } + /* update cycle count */ + fs->sector_cycle = empty_ate.cycle_cnt; + + if (!sec_closed) { + /* We found an Open sector following a closed one */ + break; + } + } + } + /* all sectors are closed, and zms magic number not found. This is not a zms fs */ + if ((closed_sectors == fs->sector_count) && !zms_magic_exist) { + rc = -EDEADLK; + goto end; + } + /* TODO: add a recovery mechanism here if the ZMS magic number exist but all + * sectors are closed + */ + + if (i == fs->sector_count) { + /* none of the sectors were closed, which means that the first + * sector is the one in use, except if there are only 2 sectors. + * Let's check if the last sector has valid ATEs otherwise set + * the open sector to the first one. + */ + rc = zms_flash_ate_rd(fs, addr - fs->ate_size, &first_ate); + if (rc) { + goto end; + } + if (!zms_ate_valid(fs, &first_ate)) { + zms_sector_advance(fs, &addr); + } + rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate); + if (rc) { + goto end; + } + + if (zms_empty_ate_valid(fs, &empty_ate)) { + /* Empty ATE is valid, let's verify that this is a ZMS storage system */ + if (ZMS_GET_MAGIC_NUMBER(empty_ate.metadata) == ZMS_MAGIC_NUMBER) { + zms_magic_exist = true; + /* Let's check the version */ + if (ZMS_GET_VERSION(empty_ate.metadata) != ZMS_DEFAULT_VERSION) { + LOG_ERR("ZMS Version is not supported"); + rc = -ENOEXEC; + goto end; + } + } + } else { + rc = zms_flash_erase_sector(fs, addr); + if (rc) { + goto end; + } + rc = zms_add_empty_ate(fs, addr); + if (rc) { + goto end; + } + } + rc = zms_get_sector_cycle(fs, addr, &fs->sector_cycle); + if (rc == -ENOENT) { + /* sector never used */ + fs->sector_cycle = 0; + } else if (rc) { + /* bad flash read */ + goto end; + } + } + + /* addr contains address of closing ate in the most recent sector, + * search for the last valid ate using the recover_last_ate routine + * and also update the data_wra + */ + rc = zms_recover_last_ate(fs, &addr, &data_wra); + if (rc) { + goto end; + } + + /* addr contains address of the last valid ate in the most recent sector + * data_wra contains the data write address of the current sector + */ + fs->ate_wra = addr; + fs->data_wra = data_wra; + + /* fs->ate_wra should point to the next available entry. This is normally + * the next position after the one found by the recovery function. + * Let's verify that it doesn't contain any valid ATE, otherwise search for + * an empty position + */ + while (fs->ate_wra >= fs->data_wra) { + rc = zms_flash_ate_rd(fs, fs->ate_wra, &last_ate); + if (rc) { + goto end; + } + if (!zms_ate_valid(fs, &last_ate)) { + /* found empty location */ + break; + } + + /* ate on the last position within the sector is + * reserved for deletion an entry + */ + if ((fs->ate_wra == fs->data_wra) && last_ate.len) { + /* not a delete ate */ + rc = -ESPIPE; + goto end; + } + + fs->ate_wra -= fs->ate_size; + } + + /* The sector after the write sector is either empty with a valid empty ATE (regular case) + * or it has never been used or it is a closed sector (GC didn't finish) + * If it is a closed sector we must look for a valid GC done ATE in the current write + * sector, if it is missing, we need to restart gc because it has been interrupted. + * If no valid empty ATE is found then it has never been used. Just erase it by adding + * a valid empty ATE. + * When gc needs to be restarted, first erase the sector by adding an empty + * ATE otherwise the data might not fit into the sector. + */ + addr = zms_close_ate_addr(fs, fs->ate_wra); + zms_sector_advance(fs, &addr); + + /* verify if the sector is closed */ + sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); + if (sec_closed < 0) { + rc = sec_closed; + goto end; + } + + if (sec_closed == 1) { + /* The sector after fs->ate_wrt is closed. + * Look for a marker (gc_done_ate) that indicates that gc was finished. + */ + bool gc_done_marker = false; + struct zms_ate gc_done_ate; + + fs->sector_cycle = empty_ate.cycle_cnt; + addr = fs->ate_wra + fs->ate_size; + while (SECTOR_OFFSET(addr) < (fs->sector_size - 2 * fs->ate_size)) { + rc = zms_flash_ate_rd(fs, addr, &gc_done_ate); + if (rc) { + goto end; + } + + if (zms_gc_done_ate_valid(fs, &gc_done_ate)) { + break; + } + addr += fs->ate_size; + } + + if (gc_done_marker) { + /* erase the next sector */ + LOG_INF("GC Done marker found"); + addr = fs->ate_wra & ADDR_SECT_MASK; + zms_sector_advance(fs, &addr); + rc = zms_flash_erase_sector(fs, addr); + if (rc < 0) { + goto end; + } + rc = zms_add_empty_ate(fs, addr); + goto end; + } + LOG_INF("No GC Done marker found: restarting gc"); + rc = zms_flash_erase_sector(fs, fs->ate_wra); + if (rc) { + goto end; + } + rc = zms_add_empty_ate(fs, fs->ate_wra); + if (rc) { + goto end; + } + + /* Let's point to the first writable position */ + fs->ate_wra &= ADDR_SECT_MASK; + fs->ate_wra += (fs->sector_size - 3 * fs->ate_size); + fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK); +#ifdef CONFIG_ZMS_LOOKUP_CACHE + /** + * At this point, the lookup cache wasn't built but the gc function need to use it. + * So, temporarily, we set the lookup cache to the end of the fs. + * The cache will be rebuilt afterwards + **/ + for (i = 0; i < CONFIG_ZMS_LOOKUP_CACHE_SIZE; i++) { + fs->lookup_cache[i] = fs->ate_wra; + } +#endif + rc = zms_gc(fs); + goto end; + } + +end: +#ifdef CONFIG_ZMS_LOOKUP_CACHE + if (!rc) { + rc = zms_lookup_cache_rebuild(fs); + } +#endif + /* If the sector is empty add a gc done ate to avoid having insufficient + * space when doing gc. + */ + if ((!rc) && (SECTOR_OFFSET(fs->ate_wra) == (fs->sector_size - 3 * fs->ate_size))) { + rc = zms_add_gc_done_ate(fs); + } + k_mutex_unlock(&fs->zms_lock); + + return rc; +} + +int zms_mount(struct zms_fs *fs) +{ + + int rc; + struct flash_pages_info info; + size_t write_block_size; + + k_mutex_init(&fs->zms_lock); + + fs->flash_parameters = flash_get_parameters(fs->flash_device); + if (fs->flash_parameters == NULL) { + LOG_ERR("Could not obtain flash parameters"); + return -EINVAL; + } + + fs->ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + write_block_size = flash_get_write_block_size(fs->flash_device); + + /* check that the write block size is supported */ + if (write_block_size > ZMS_BLOCK_SIZE || write_block_size == 0) { + LOG_ERR("Unsupported write block size"); + return -EINVAL; + } + + /* When the device need erase operations before write let's check that + * sector size is a multiple of pagesize + */ + if (flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT) { + rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info); + if (rc) { + LOG_ERR("Unable to get page info"); + return -EINVAL; + } + if (!fs->sector_size || fs->sector_size % info.size) { + LOG_ERR("Invalid sector size"); + return -EINVAL; + } + } + + /* we need at least 5 aligned ATEs size as the minimum sector size + * 1 close ATE, 1 empty ATE, 1 GC done ATE, 1 Delete ATE, 1 ID/Value ATE + */ + if (fs->sector_size < ZMS_MIN_ATE_NUM * fs->ate_size) { + LOG_ERR("Invalid sector size, should be at least %u", + ZMS_MIN_ATE_NUM * fs->ate_size); + } + + /* check the number of sectors, it should be at least 2 */ + if (fs->sector_count < 2) { + LOG_ERR("Configuration error - sector count below minimum requirement (2)"); + return -EINVAL; + } + + rc = zms_init(fs); + + if (rc) { + return rc; + } + + /* zms is ready for use */ + fs->ready = true; + + LOG_INF("%u Sectors of %u bytes", fs->sector_count, fs->sector_size); + LOG_INF("alloc wra: %llu, %llx", SECTOR_NUM(fs->ate_wra), SECTOR_OFFSET(fs->ate_wra)); + LOG_INF("data wra: %llu, %llx", SECTOR_NUM(fs->data_wra), SECTOR_OFFSET(fs->data_wra)); + + return 0; +} + +ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len) +{ + int rc; + size_t data_size; + struct zms_ate wlk_ate; + uint64_t wlk_addr, rd_addr; + uint32_t gc_count, required_space = 0U; /* no space, appropriate for delete ate */ + int prev_found = 0; + + if (!fs->ready) { + LOG_ERR("zms not initialized"); + return -EACCES; + } + + data_size = zms_al_size(fs, len); + + /* The maximum data size is sector size - 5 ate + * where: 1 ate for data, 1 ate for sector close, 1 ate for empty, + * 1 ate for gc done, and 1 ate to always allow a delete. + * We cannot also store more than 64 KB of data + */ + if ((len > (fs->sector_size - 5 * fs->ate_size)) || (len > UINT16_MAX) || + ((len > 0) && (data == NULL))) { + return -EINVAL; + } + + /* find latest entry with same id */ +#ifdef CONFIG_ZMS_LOOKUP_CACHE + wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; + + if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) { + goto no_cached_entry; + } +#else + wlk_addr = fs->ate_wra; +#endif + rd_addr = wlk_addr; + + /* Search for a previous valid ATE with the same ID */ + prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, &rd_addr); + if (prev_found < 0) { + return prev_found; + } + +#ifdef CONFIG_ZMS_LOOKUP_CACHE +no_cached_entry: +#endif + if (prev_found) { + /* previous entry found */ + if (len > ZMS_DATA_IN_ATE_SIZE) { + rd_addr &= ADDR_SECT_MASK; + rd_addr += wlk_ate.offset; + } + + if (len == 0) { + /* do not try to compare with empty data */ + if (wlk_ate.len == 0U) { + /* skip delete entry as it is already the + * last one + */ + return 0; + } + } else if (len == wlk_ate.len) { + /* do not try to compare if lengths are not equal */ + /* compare the data and if equal return 0 */ + if (len <= ZMS_DATA_IN_ATE_SIZE) { + rc = memcmp(&wlk_ate.data, data, len); + if (!rc) { + return 0; + } + } else { + rc = zms_flash_block_cmp(fs, rd_addr, data, len); + if (rc <= 0) { + return rc; + } + } + } + } else { + /* skip delete entry for non-existing entry */ + if (len == 0) { + return 0; + } + } + + /* calculate required space if the entry contains data */ + if (data_size) { + /* Leave space for delete ate */ + if (len > ZMS_DATA_IN_ATE_SIZE) { + required_space = data_size + fs->ate_size; + } else { + required_space = fs->ate_size; + } + } + + k_mutex_lock(&fs->zms_lock, K_FOREVER); + + gc_count = 0; + while (1) { + if (gc_count == fs->sector_count) { + /* gc'ed all sectors, no extra space will be created + * by extra gc. + */ + rc = -ENOSPC; + goto end; + } + + /* We need to make sure that we leave the ATE at address 0x0 of the sector + * empty (even for delete ATE). Otherwise, the fs->ate_wra will be decremented + * after this write by ate_size and it will underflow. + * So the first position of a sector (fs->ate_wra = 0x0) is forbidden for ATEs + * and the second position could be written only be a delete ATE. + */ + if ((SECTOR_OFFSET(fs->ate_wra)) && + (fs->ate_wra >= (fs->data_wra + required_space)) && + (SECTOR_OFFSET(fs->ate_wra - fs->ate_size) || !len)) { + rc = zms_flash_write_entry(fs, id, data, len); + if (rc) { + goto end; + } + break; + } + rc = zms_sector_close(fs); + if (rc) { + LOG_ERR("Failed to close the sector, returned = %d", rc); + goto end; + } + rc = zms_gc(fs); + if (rc) { + LOG_ERR("Garbage collection failed, returned = %d", rc); + goto end; + } + gc_count++; + } + rc = len; +end: + k_mutex_unlock(&fs->zms_lock); + return rc; +} + +int zms_delete(struct zms_fs *fs, uint32_t id) +{ + return zms_write(fs, id, NULL, 0); +} + +ssize_t zms_read_hist(struct zms_fs *fs, uint32_t id, void *data, size_t len, uint32_t cnt) +{ + int rc, prev_found = 0; + uint64_t wlk_addr, rd_addr = 0, wlk_prev_addr = 0; + uint32_t cnt_his; + struct zms_ate wlk_ate; +#ifdef CONFIG_ZMS_DATA_CRC + uint32_t computed_data_crc; +#endif + + if (!fs->ready) { + LOG_ERR("zms not initialized"); + return -EACCES; + } + + cnt_his = 0U; + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; + + if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) { + rc = -ENOENT; + goto err; + } +#else + wlk_addr = fs->ate_wra; +#endif + + while (cnt_his <= cnt) { + wlk_prev_addr = wlk_addr; + /* Search for a previous valid ATE with the same ID */ + prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, + &wlk_prev_addr); + if (prev_found < 0) { + return prev_found; + } + if (prev_found) { + cnt_his++; + /* wlk_prev_addr contain the ATE address of the previous found ATE. */ + rd_addr = wlk_prev_addr; + /* + * compute the previous ATE address in case we need to start + * the research again. + */ + rc = zms_compute_prev_addr(fs, &wlk_prev_addr); + if (rc) { + return rc; + } + /* wlk_addr will be the start research address in the next loop */ + wlk_addr = wlk_prev_addr; + } else { + break; + } + } + + if (((!prev_found) || (wlk_ate.id != id)) || (wlk_ate.len == 0U) || (cnt_his < cnt)) { + return -ENOENT; + } + + if (wlk_ate.len <= ZMS_DATA_IN_ATE_SIZE) { + /* data is stored in the ATE */ + if (data) { + memcpy(data, &wlk_ate.data, MIN(len, wlk_ate.len)); + } + } else { + rd_addr &= ADDR_SECT_MASK; + rd_addr += wlk_ate.offset; + /* do not read or copy data if pointer is NULL */ + if (data) { + rc = zms_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len)); + if (rc) { + goto err; + } + } +#ifdef CONFIG_ZMS_DATA_CRC + /* Do not compute CRC for partial reads as CRC won't match */ + if (len >= wlk_ate.len) { + computed_data_crc = crc32_ieee(data, wlk_ate.len); + if (computed_data_crc != wlk_ate.data_crc) { + LOG_ERR("Invalid data CRC: ATE_CRC=0x%08X, " + "computed_data_crc=0x%08X", + wlk_ate.data_crc, computed_data_crc); + return -EIO; + } + } +#endif + } + + return wlk_ate.len; + +err: + return rc; +} + +ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len) +{ + int rc; + + rc = zms_read_hist(fs, id, data, len, 0); + if (rc < 0) { + return rc; + } + + /* returns the minimum between ATE data length and requested length */ + return MIN(rc, len); +} + +ssize_t zms_get_data_length(struct zms_fs *fs, uint32_t id) +{ + int rc; + + rc = zms_read_hist(fs, id, NULL, 0, 0); + + return rc; +} + +ssize_t zms_calc_free_space(struct zms_fs *fs) +{ + + int rc, previous_sector_num = ZMS_INVALID_SECTOR_NUM, prev_found = 0, sec_closed; + struct zms_ate step_ate, wlk_ate, empty_ate, close_ate; + uint64_t step_addr, wlk_addr, step_prev_addr, wlk_prev_addr, data_wra = 0U; + uint8_t current_cycle; + ssize_t free_space = 0; + + if (!fs->ready) { + LOG_ERR("zms not initialized"); + return -EACCES; + } + + /* + * There is always a closing ATE , an empty ATE, a GC_done ATE and a reserved ATE for + * deletion in each sector. + * And there is always one reserved Sector for garbage collection operations + */ + free_space = (fs->sector_count - 1) * (fs->sector_size - 4 * fs->ate_size); + + step_addr = fs->ate_wra; + + do { + step_prev_addr = step_addr; + rc = zms_prev_ate(fs, &step_addr, &step_ate); + if (rc) { + return rc; + } + + /* When changing the sector let's get the new cycle counter */ + rc = zms_get_cycle_on_sector_change(fs, step_prev_addr, previous_sector_num, + ¤t_cycle); + if (rc) { + return rc; + } + previous_sector_num = SECTOR_NUM(step_prev_addr); + + /* Invalid and deleted ATEs are free spaces. + * Header ATEs are already retrieved from free space + */ + if (!zms_ate_valid_different_sector(fs, &step_ate, current_cycle) || + (step_ate.id == ZMS_HEAD_ID) || (step_ate.len == 0)) { + continue; + } + + wlk_addr = step_addr; + /* Try to find if there is a previous valid ATE with same ID */ + prev_found = zms_find_ate_with_id(fs, step_ate.id, wlk_addr, step_addr, &wlk_ate, + &wlk_prev_addr); + if (prev_found < 0) { + return prev_found; + } + + /* If no previous ATE is found, then this is a valid ATE that cannot be + * Garbage Collected + */ + if (!prev_found || (wlk_prev_addr == step_prev_addr)) { + if (step_ate.len > ZMS_DATA_IN_ATE_SIZE) { + free_space -= zms_al_size(fs, step_ate.len); + } + free_space -= fs->ate_size; + } + } while (step_addr != fs->ate_wra); + + /* we must keep the sector_cycle before we start looking into special cases */ + current_cycle = fs->sector_cycle; + + /* Let's look now for special cases where some sectors have only ATEs with + * small data size. + */ + const uint32_t second_to_last_offset = (2 * fs->ate_size); + + for (uint32_t i = 0; i < fs->sector_count; i++) { + step_addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT)); + + /* verify if the sector is closed */ + sec_closed = zms_validate_closed_sector(fs, step_addr, &empty_ate, &close_ate); + if (sec_closed < 0) { + return sec_closed; + } + + /* If the sector is closed and its offset is pointing to a position less than the + * 3rd to last ATE position in a sector, it means that we need to leave the second + * to last ATE empty. + */ + if ((sec_closed == 1) && (close_ate.offset <= second_to_last_offset)) { + free_space -= fs->ate_size; + } else if (!sec_closed) { + /* sector is open, let's recover the last ATE */ + fs->sector_cycle = empty_ate.cycle_cnt; + rc = zms_recover_last_ate(fs, &step_addr, &data_wra); + if (rc) { + return rc; + } + if (SECTOR_OFFSET(step_addr) <= second_to_last_offset) { + free_space -= fs->ate_size; + } + } + } + /* restore sector cycle */ + fs->sector_cycle = current_cycle; + + return free_space; +} + +size_t zms_sector_max_data_size(struct zms_fs *fs) +{ + if (!fs->ready) { + LOG_ERR("ZMS not initialized"); + return -EACCES; + } + + return fs->ate_wra - fs->data_wra - fs->ate_size; +} + +int zms_sector_use_next(struct zms_fs *fs) +{ + int ret; + + if (!fs->ready) { + LOG_ERR("ZMS not initialized"); + return -EACCES; + } + + k_mutex_lock(&fs->zms_lock, K_FOREVER); + + ret = zms_sector_close(fs); + if (ret != 0) { + goto end; + } + + ret = zms_gc(fs); + +end: + k_mutex_unlock(&fs->zms_lock); + return ret; +} diff --git a/subsys/fs/zms/zms_priv.h b/subsys/fs/zms/zms_priv.h new file mode 100644 index 00000000000000..6594048ea0f474 --- /dev/null +++ b/subsys/fs/zms/zms_priv.h @@ -0,0 +1,76 @@ +/* ZMS: Zephyr Memory Storage + * + * Copyright (c) 2024 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + */ +#ifndef __ZMS_PRIV_H_ +#define __ZMS_PRIV_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * MASKS AND SHIFT FOR ADDRESSES + * an address in zms is an uint64_t where: + * high 4 bytes represent the sector number + * low 4 bytes represent the offset in a sector + */ +#define ADDR_SECT_MASK GENMASK64(63, 32) +#define ADDR_SECT_SHIFT 32 +#define ADDR_OFFS_MASK GENMASK64(31, 0) +#define SECTOR_NUM(x) FIELD_GET(ADDR_SECT_MASK, x) +#define SECTOR_OFFSET(x) FIELD_GET(ADDR_OFFS_MASK, x) + +#if defined(CONFIG_ZMS_CUSTOM_BLOCK_SIZE) +#define ZMS_BLOCK_SIZE CONFIG_ZMS_MAX_BLOCK_SIZE +#else +#define ZMS_BLOCK_SIZE 32 +#endif + +#define ZMS_LOOKUP_CACHE_NO_ADDR GENMASK64(63, 0) +#define ZMS_HEAD_ID GENMASK(31, 0) + +#define ZMS_VERSION_MASK GENMASK(7, 0) +#define ZMS_GET_VERSION(x) FIELD_GET(ZMS_VERSION_MASK, x) +#define ZMS_DEFAULT_VERSION 1 +#define ZMS_MAGIC_NUMBER 0x42 /* murmur3a hash of "ZMS" (MSB) */ +#define ZMS_MAGIC_NUMBER_MASK GENMASK(15, 8) +#define ZMS_GET_MAGIC_NUMBER(x) FIELD_GET(ZMS_MAGIC_NUMBER_MASK, x) +#define ZMS_MIN_ATE_NUM 5 + +#define ZMS_INVALID_SECTOR_NUM -1 +#define ZMS_DATA_IN_ATE_SIZE 8 + +struct zms_ate { + uint8_t crc8; /* crc8 check of the entry */ + uint8_t cycle_cnt; /* cycle counter for non erasable devices */ + uint32_t id; /* data id */ + uint16_t len; /* data len within sector */ + union { + uint8_t data[8]; /* used to store small size data */ + struct { + uint32_t offset; /* data offset within sector */ + union { + uint32_t data_crc; /* + * crc for data: The data CRC is checked only + * when the whole data of the element is read. + * The data CRC is not checked for a partial + * read, as it is computed for the complete + * set of data. + */ + uint32_t metadata; /* + * Used to store metadata information + * such as storage version. + */ + }; + }; + }; +} __packed; + +#ifdef __cplusplus +} +#endif + +#endif /* __ZMS_PRIV_H_ */ diff --git a/tests/subsys/fs/zms/CMakeLists.txt b/tests/subsys/fs/zms/CMakeLists.txt new file mode 100644 index 00000000000000..66d9c91a520d62 --- /dev/null +++ b/tests/subsys/fs/zms/CMakeLists.txt @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(fs_zms) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) +target_include_directories(app PRIVATE ${ZEPHYR_BASE}/subsys/fs/zms) diff --git a/tests/subsys/fs/zms/boards/native_sim.overlay b/tests/subsys/fs/zms/boards/native_sim.overlay new file mode 100644 index 00000000000000..0f330fd7b176e0 --- /dev/null +++ b/tests/subsys/fs/zms/boards/native_sim.overlay @@ -0,0 +1,9 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +&flash0 { + erase-block-size = <0x400>; +}; diff --git a/tests/subsys/fs/zms/boards/qemu_x86_ev_0x00.overlay b/tests/subsys/fs/zms/boards/qemu_x86_ev_0x00.overlay new file mode 100644 index 00000000000000..ffc3bc97aacac2 --- /dev/null +++ b/tests/subsys/fs/zms/boards/qemu_x86_ev_0x00.overlay @@ -0,0 +1,9 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +&sim_flash { + erase-value = < 0x00 >; +}; diff --git a/tests/subsys/fs/zms/prj.conf b/tests/subsys/fs/zms/prj.conf new file mode 100644 index 00000000000000..5d84f6e17fde49 --- /dev/null +++ b/tests/subsys/fs/zms/prj.conf @@ -0,0 +1,9 @@ +CONFIG_ZTEST=y +CONFIG_ZTEST_STACK_SIZE=4096 + +CONFIG_FLASH=y +CONFIG_FLASH_MAP=y + +CONFIG_ZMS=y +CONFIG_LOG=y +CONFIG_ZMS_LOG_LEVEL_DBG=y diff --git a/tests/subsys/fs/zms/src/main.c b/tests/subsys/fs/zms/src/main.c new file mode 100644 index 00000000000000..80866687dbab72 --- /dev/null +++ b/tests/subsys/fs/zms/src/main.c @@ -0,0 +1,888 @@ +/* + * Copyright (c) 2024 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * This test is designed to be run using flash-simulator which provide + * functionality for flash property customization and emulating errors in + * flash operation in parallel to regular flash API. + * Test should be run on qemu_x86 or native_sim target. + */ + +#if !defined(CONFIG_BOARD_QEMU_X86) && !defined(CONFIG_ARCH_POSIX) +#error "Run only on qemu_x86 or a posix architecture based target (for ex. native_sim)" +#endif + +#include +#include +#include + +#include +#include +#include +#include +#include +#include "zms_priv.h" + +#define TEST_ZMS_AREA storage_partition +#define TEST_ZMS_AREA_OFFSET FIXED_PARTITION_OFFSET(TEST_ZMS_AREA) +#define TEST_ZMS_AREA_ID FIXED_PARTITION_ID(TEST_ZMS_AREA) +#define TEST_ZMS_AREA_DEV DEVICE_DT_GET(DT_MTD_FROM_FIXED_PARTITION(DT_NODELABEL(TEST_ZMS_AREA))) +#define TEST_DATA_ID 1 +#define TEST_SECTOR_COUNT 5U + +static const struct device *const flash_dev = TEST_ZMS_AREA_DEV; + +struct zms_fixture { + struct zms_fs fs; + struct stats_hdr *sim_stats; + struct stats_hdr *sim_thresholds; +}; + +static void *setup(void) +{ + int err; + const struct flash_area *fa; + struct flash_pages_info info; + static struct zms_fixture fixture; + + __ASSERT_NO_MSG(device_is_ready(flash_dev)); + + err = flash_area_open(TEST_ZMS_AREA_ID, &fa); + zassert_true(err == 0, "flash_area_open() fail: %d", err); + + fixture.fs.offset = TEST_ZMS_AREA_OFFSET; + err = flash_get_page_info_by_offs(flash_area_get_device(fa), fixture.fs.offset, &info); + zassert_true(err == 0, "Unable to get page info: %d", err); + + fixture.fs.sector_size = info.size; + fixture.fs.sector_count = TEST_SECTOR_COUNT; + fixture.fs.flash_device = flash_area_get_device(fa); + + return &fixture; +} + +static void before(void *data) +{ + struct zms_fixture *fixture = (struct zms_fixture *)data; + + fixture->sim_stats = stats_group_find("flash_sim_stats"); + fixture->sim_thresholds = stats_group_find("flash_sim_thresholds"); +} + +static void after(void *data) +{ + struct zms_fixture *fixture = (struct zms_fixture *)data; + + if (fixture->sim_stats) { + stats_reset(fixture->sim_stats); + } + if (fixture->sim_thresholds) { + stats_reset(fixture->sim_thresholds); + } + + /* Clear ZMS */ + if (fixture->fs.ready) { + int err; + + err = zms_clear(&fixture->fs); + zassert_true(err == 0, "zms_clear call failure: %d", err); + } + + fixture->fs.sector_count = TEST_SECTOR_COUNT; +} + +ZTEST_SUITE(zms, NULL, setup, before, after, NULL); + +ZTEST_F(zms, test_zms_mount) +{ + int err; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); +} + +static void execute_long_pattern_write(uint32_t id, struct zms_fs *fs) +{ + char rd_buf[512]; + char wr_buf[512]; + char pattern[] = {0xDE, 0xAD, 0xBE, 0xEF}; + size_t len; + + len = zms_read(fs, id, rd_buf, sizeof(rd_buf)); + zassert_true(len == -ENOENT, "zms_read unexpected failure: %d", len); + + BUILD_ASSERT((sizeof(wr_buf) % sizeof(pattern)) == 0); + for (int i = 0; i < sizeof(wr_buf); i += sizeof(pattern)) { + memcpy(wr_buf + i, pattern, sizeof(pattern)); + } + + len = zms_write(fs, id, wr_buf, sizeof(wr_buf)); + zassert_true(len == sizeof(wr_buf), "zms_write failed: %d", len); + + len = zms_read(fs, id, rd_buf, sizeof(rd_buf)); + zassert_true(len == sizeof(rd_buf), "zms_read unexpected failure: %d", len); + zassert_mem_equal(wr_buf, rd_buf, sizeof(rd_buf), "RD buff should be equal to the WR buff"); +} + +ZTEST_F(zms, test_zms_write) +{ + int err; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + execute_long_pattern_write(TEST_DATA_ID, &fixture->fs); +} + +static int flash_sim_write_calls_find(struct stats_hdr *hdr, void *arg, const char *name, + uint16_t off) +{ + if (!strcmp(name, "flash_write_calls")) { + uint32_t **flash_write_stat = (uint32_t **)arg; + *flash_write_stat = (uint32_t *)((uint8_t *)hdr + off); + } + + return 0; +} + +static int flash_sim_max_write_calls_find(struct stats_hdr *hdr, void *arg, const char *name, + uint16_t off) +{ + if (!strcmp(name, "max_write_calls")) { + uint32_t **max_write_calls = (uint32_t **)arg; + *max_write_calls = (uint32_t *)((uint8_t *)hdr + off); + } + + return 0; +} + +ZTEST_F(zms, test_zms_corrupted_write) +{ + int err; + size_t len; + char rd_buf[512]; + char wr_buf_1[512]; + char wr_buf_2[512]; + char pattern_1[] = {0xDE, 0xAD, 0xBE, 0xEF}; + char pattern_2[] = {0x03, 0xAA, 0x85, 0x6F}; + uint32_t *flash_write_stat; + uint32_t *flash_max_write_calls; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + err = zms_read(&fixture->fs, TEST_DATA_ID, rd_buf, sizeof(rd_buf)); + zassert_true(err == -ENOENT, "zms_read unexpected failure: %d", err); + + BUILD_ASSERT((sizeof(wr_buf_1) % sizeof(pattern_1)) == 0); + for (int i = 0; i < sizeof(wr_buf_1); i += sizeof(pattern_1)) { + memcpy(wr_buf_1 + i, pattern_1, sizeof(pattern_1)); + } + + len = zms_write(&fixture->fs, TEST_DATA_ID, wr_buf_1, sizeof(wr_buf_1)); + zassert_true(len == sizeof(wr_buf_1), "zms_write failed: %d", len); + + len = zms_read(&fixture->fs, TEST_DATA_ID, rd_buf, sizeof(rd_buf)); + zassert_true(len == sizeof(rd_buf), "zms_read unexpected failure: %d", len); + zassert_mem_equal(wr_buf_1, rd_buf, sizeof(rd_buf), + "RD buff should be equal to the first WR buff"); + + BUILD_ASSERT((sizeof(wr_buf_2) % sizeof(pattern_2)) == 0); + for (int i = 0; i < sizeof(wr_buf_2); i += sizeof(pattern_2)) { + memcpy(wr_buf_2 + i, pattern_2, sizeof(pattern_2)); + } + + /* Set the maximum number of writes that the flash simulator can + * execute. + */ + stats_walk(fixture->sim_thresholds, flash_sim_max_write_calls_find, &flash_max_write_calls); + stats_walk(fixture->sim_stats, flash_sim_write_calls_find, &flash_write_stat); + + *flash_max_write_calls = *flash_write_stat - 1; + *flash_write_stat = 0; + + /* Flash simulator will lose part of the data at the end of this write. + * This should simulate power down during flash write. The written data + * are corrupted at this point and should be discarded by the ZMS. + */ + len = zms_write(&fixture->fs, TEST_DATA_ID, wr_buf_2, sizeof(wr_buf_2)); + zassert_true(len == sizeof(wr_buf_2), "zms_write failed: %d", len); + + /* Reinitialize the ZMS. */ + memset(&fixture->fs, 0, sizeof(fixture->fs)); + (void)setup(); + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + len = zms_read(&fixture->fs, TEST_DATA_ID, rd_buf, sizeof(rd_buf)); + zassert_true(len == sizeof(rd_buf), "zms_read unexpected failure: %d", len); + zassert_true(memcmp(wr_buf_2, rd_buf, sizeof(rd_buf)) != 0, + "RD buff should not be equal to the second WR buff because of " + "corrupted write operation"); + zassert_mem_equal(wr_buf_1, rd_buf, sizeof(rd_buf), + "RD buff should be equal to the first WR buff because subsequent " + "write operation has failed"); +} + +ZTEST_F(zms, test_zms_gc) +{ + int err; + int len; + uint8_t buf[32]; + uint8_t rd_buf[32]; + + const uint16_t max_id = 10; + /* 21st write will trigger GC. */ + const uint16_t max_writes = 21; + + fixture->fs.sector_count = 2; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + for (uint32_t i = 0; i < max_writes; i++) { + uint8_t id = (i % max_id); + uint8_t id_data = id + max_id * (i / max_id); + + memset(buf, id_data, sizeof(buf)); + + len = zms_write(&fixture->fs, id, buf, sizeof(buf)); + zassert_true(len == sizeof(buf), "zms_write failed: %d", len); + } + + for (uint32_t id = 0; id < max_id; id++) { + len = zms_read(&fixture->fs, id, rd_buf, sizeof(buf)); + zassert_true(len == sizeof(rd_buf), "zms_read unexpected failure: %d", len); + + for (uint16_t i = 0; i < sizeof(rd_buf); i++) { + rd_buf[i] = rd_buf[i] % max_id; + buf[i] = id; + } + zassert_mem_equal(buf, rd_buf, sizeof(rd_buf), + "RD buff should be equal to the WR buff"); + } + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + for (uint32_t id = 0; id < max_id; id++) { + len = zms_read(&fixture->fs, id, rd_buf, sizeof(buf)); + zassert_true(len == sizeof(rd_buf), "zms_read unexpected failure: %d", len); + + for (uint16_t i = 0; i < sizeof(rd_buf); i++) { + rd_buf[i] = rd_buf[i] % max_id; + buf[i] = id; + } + zassert_mem_equal(buf, rd_buf, sizeof(rd_buf), + "RD buff should be equal to the WR buff"); + } +} + +static void write_content(uint32_t max_id, uint32_t begin, uint32_t end, struct zms_fs *fs) +{ + uint8_t buf[32]; + ssize_t len; + + for (uint32_t i = begin; i < end; i++) { + uint8_t id = (i % max_id); + uint8_t id_data = id + max_id * (i / max_id); + + memset(buf, id_data, sizeof(buf)); + + len = zms_write(fs, id, buf, sizeof(buf)); + zassert_true(len == sizeof(buf), "zms_write failed: %d", len); + } +} + +static void check_content(uint32_t max_id, struct zms_fs *fs) +{ + uint8_t rd_buf[32]; + uint8_t buf[32]; + ssize_t len; + + for (uint32_t id = 0; id < max_id; id++) { + len = zms_read(fs, id, rd_buf, sizeof(buf)); + zassert_true(len == sizeof(rd_buf), "zms_read unexpected failure: %d", len); + + for (uint16_t i = 0; i < ARRAY_SIZE(rd_buf); i++) { + rd_buf[i] = rd_buf[i] % max_id; + buf[i] = id; + } + zassert_mem_equal(buf, rd_buf, sizeof(rd_buf), + "RD buff should be equal to the WR buff"); + } +} + +/** + * Full round of GC over 3 sectors + */ +ZTEST_F(zms, test_zms_gc_3sectors) +{ + int err; + + const uint16_t max_id = 10; + /* 41st write will trigger 1st GC. */ + const uint16_t max_writes = 41; + /* 61st write will trigger 2nd GC. */ + const uint16_t max_writes_2 = 41 + 20; + /* 81st write will trigger 3rd GC. */ + const uint16_t max_writes_3 = 41 + 20 + 20; + /* 101st write will trigger 4th GC. */ + const uint16_t max_writes_4 = 41 + 20 + 20 + 20; + + fixture->fs.sector_count = 3; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 0, "unexpected write sector"); + + /* Trigger 1st GC */ + write_content(max_id, 0, max_writes, &fixture->fs); + + /* sector sequence: empty,closed, write */ + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 2, "unexpected write sector"); + check_content(max_id, &fixture->fs); + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 2, "unexpected write sector"); + check_content(max_id, &fixture->fs); + + /* Trigger 2nd GC */ + write_content(max_id, max_writes, max_writes_2, &fixture->fs); + + /* sector sequence: write, empty, closed */ + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 0, "unexpected write sector"); + check_content(max_id, &fixture->fs); + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 0, "unexpected write sector"); + check_content(max_id, &fixture->fs); + + /* Trigger 3rd GC */ + write_content(max_id, max_writes_2, max_writes_3, &fixture->fs); + + /* sector sequence: closed, write, empty */ + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 1, "unexpected write sector"); + check_content(max_id, &fixture->fs); + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 1, "unexpected write sector"); + check_content(max_id, &fixture->fs); + + /* Trigger 4th GC */ + write_content(max_id, max_writes_3, max_writes_4, &fixture->fs); + + /* sector sequence: empty,closed, write */ + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 2, "unexpected write sector"); + check_content(max_id, &fixture->fs); + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + zassert_equal(fixture->fs.ate_wra >> ADDR_SECT_SHIFT, 2, "unexpected write sector"); + check_content(max_id, &fixture->fs); +} + +static int flash_sim_max_len_find(struct stats_hdr *hdr, void *arg, const char *name, uint16_t off) +{ + if (!strcmp(name, "max_len")) { + uint32_t **max_len = (uint32_t **)arg; + *max_len = (uint32_t *)((uint8_t *)hdr + off); + } + + return 0; +} + +ZTEST_F(zms, test_zms_corrupted_sector_close_operation) +{ + int err; + int len; + uint8_t buf[32]; + uint32_t *flash_write_stat; + uint32_t *flash_max_write_calls; + uint32_t *flash_max_len; + + const uint16_t max_id = 10; + /* 21st write will trigger GC. */ + const uint16_t max_writes = 21; + + /* Get the address of simulator parameters. */ + stats_walk(fixture->sim_thresholds, flash_sim_max_write_calls_find, &flash_max_write_calls); + stats_walk(fixture->sim_thresholds, flash_sim_max_len_find, &flash_max_len); + stats_walk(fixture->sim_stats, flash_sim_write_calls_find, &flash_write_stat); + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + for (uint32_t i = 0; i < max_writes; i++) { + uint8_t id = (i % max_id); + uint8_t id_data = id + max_id * (i / max_id); + + memset(buf, id_data, sizeof(buf)); + + if (i == max_writes - 1) { + /* Reset stats. */ + *flash_write_stat = 0; + + /* Block write calls and simulate power down during + * sector closing operation, so only a part of a ZMS + * closing ate will be written. + */ + *flash_max_write_calls = 1; + *flash_max_len = 4; + } + len = zms_write(&fixture->fs, id, buf, sizeof(buf)); + zassert_true(len == sizeof(buf), "zms_write failed: %d", len); + } + + /* Make the flash simulator functional again. */ + *flash_max_write_calls = 0; + *flash_max_len = 0; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + check_content(max_id, &fixture->fs); + + /* Ensure that the ZMS is able to store new content. */ + execute_long_pattern_write(max_id, &fixture->fs); +} + +/** + * @brief Test case when storage become full, so only deletion is possible. + */ +ZTEST_F(zms, test_zms_full_sector) +{ + int err; + ssize_t len; + uint32_t filling_id = 0; + uint32_t i, data_read; + + fixture->fs.sector_count = 3; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + while (1) { + len = zms_write(&fixture->fs, filling_id, &filling_id, sizeof(filling_id)); + if (len == -ENOSPC) { + break; + } + zassert_true(len == sizeof(filling_id), "zms_write failed: %d", len); + filling_id++; + } + + /* check whether can delete whatever from full storage */ + err = zms_delete(&fixture->fs, 1); + zassert_true(err == 0, "zms_delete call failure: %d", err); + + /* the last sector is full now, test re-initialization */ + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + len = zms_write(&fixture->fs, filling_id, &filling_id, sizeof(filling_id)); + zassert_true(len == sizeof(filling_id), "zms_write failed: %d", len); + + /* sanitycheck on ZMS content */ + for (i = 0; i <= filling_id; i++) { + len = zms_read(&fixture->fs, i, &data_read, sizeof(data_read)); + if (i == 1) { + zassert_true(len == -ENOENT, "zms_read shouldn't found the entry: %d", len); + } else { + zassert_true(len == sizeof(data_read), + "zms_read #%d failed: len is %zd instead of %zu", i, len, + sizeof(data_read)); + zassert_equal(data_read, i, "read unexpected data: %d instead of %d", + data_read, i); + } + } +} + +ZTEST_F(zms, test_delete) +{ + int err; + ssize_t len; + uint32_t filling_id, data_read; + uint32_t ate_wra, data_wra; + + fixture->fs.sector_count = 3; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + for (filling_id = 0; filling_id < 10; filling_id++) { + len = zms_write(&fixture->fs, filling_id, &filling_id, sizeof(filling_id)); + + zassert_true(len == sizeof(filling_id), "zms_write failed: %d", len); + + if (filling_id != 0) { + continue; + } + + /* delete the first entry while it is the most recent one */ + err = zms_delete(&fixture->fs, filling_id); + zassert_true(err == 0, "zms_delete call failure: %d", err); + + len = zms_read(&fixture->fs, filling_id, &data_read, sizeof(data_read)); + zassert_true(len == -ENOENT, "zms_read shouldn't found the entry: %d", len); + } + + /* delete existing entry */ + err = zms_delete(&fixture->fs, 1); + zassert_true(err == 0, "zms_delete call failure: %d", err); + + len = zms_read(&fixture->fs, 1, &data_read, sizeof(data_read)); + zassert_true(len == -ENOENT, "zms_read shouldn't found the entry: %d", len); + + ate_wra = fixture->fs.ate_wra; + data_wra = fixture->fs.data_wra; + + /* delete already deleted entry */ + err = zms_delete(&fixture->fs, 1); + zassert_true(err == 0, "zms_delete call failure: %d", err); + zassert_true(ate_wra == fixture->fs.ate_wra && data_wra == fixture->fs.data_wra, + "delete already deleted entry should not make" + " any footprint in the storage"); + + /* delete nonexisting entry */ + err = zms_delete(&fixture->fs, filling_id); + zassert_true(err == 0, "zms_delete call failure: %d", err); + zassert_true(ate_wra == fixture->fs.ate_wra && data_wra == fixture->fs.data_wra, + "delete nonexistent entry should not make" + " any footprint in the storage"); +} + +/* + * Test that garbage-collection can recover all ate's even when the last ate, + * ie close_ate, is corrupt. In this test the close_ate is set to point to the + * last ate at -5. A valid ate is however present at -6. Since the close_ate + * has an invalid crc8, the offset should not be used and a recover of the + * last ate should be done instead. + */ +ZTEST_F(zms, test_zms_gc_corrupt_close_ate) +{ + struct zms_ate ate, close_ate, empty_ate; + uint32_t data; + ssize_t len; + int err; + + Z_TEST_SKIP_IFNDEF(CONFIG_FLASH_SIMULATOR_DOUBLE_WRITES); + close_ate.id = 0xffffffff; + close_ate.offset = fixture->fs.sector_size - sizeof(struct zms_ate) * 5; + close_ate.len = 0; + close_ate.metadata = 0xffffffff; + close_ate.cycle_cnt = 1; + close_ate.crc8 = 0xff; /* Incorrect crc8 */ + + empty_ate.id = 0xffffffff; + empty_ate.offset = 0; + empty_ate.len = 0xffff; + empty_ate.metadata = 0x4201; + empty_ate.cycle_cnt = 1; + empty_ate.crc8 = + crc8_ccitt(0xff, (uint8_t *)&empty_ate + SIZEOF_FIELD(struct zms_ate, crc8), + sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); + + memset(&ate, 0, sizeof(struct zms_ate)); + ate.id = 0x1; + ate.len = sizeof(data); + ate.cycle_cnt = 1; + data = 0xaa55aa55; + memcpy(&ate.data, &data, sizeof(data)); + ate.crc8 = crc8_ccitt(0xff, (uint8_t *)&ate + SIZEOF_FIELD(struct zms_ate, crc8), + sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); + + /* Add empty ATE */ + err = flash_write(fixture->fs.flash_device, + fixture->fs.offset + fixture->fs.sector_size - sizeof(struct zms_ate), + &empty_ate, sizeof(empty_ate)); + zassert_true(err == 0, "flash_write failed: %d", err); + + /* Mark sector 0 as closed */ + err = flash_write(fixture->fs.flash_device, + fixture->fs.offset + fixture->fs.sector_size - 2 * sizeof(struct zms_ate), + &close_ate, sizeof(close_ate)); + zassert_true(err == 0, "flash_write failed: %d", err); + + /* Write valid ate at -6 */ + err = flash_write(fixture->fs.flash_device, + fixture->fs.offset + fixture->fs.sector_size - 6 * sizeof(struct zms_ate), + &ate, sizeof(ate)); + zassert_true(err == 0, "flash_write failed: %d", err); + + /* Mark sector 1 as closed */ + err = flash_write(fixture->fs.flash_device, + fixture->fs.offset + (2 * fixture->fs.sector_size) - + 2 * sizeof(struct zms_ate), + &close_ate, sizeof(close_ate)); + zassert_true(err == 0, "flash_write failed: %d", err); + + fixture->fs.sector_count = 3; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + data = 0; + len = zms_read(&fixture->fs, 1, &data, sizeof(data)); + zassert_true(len == sizeof(data), "zms_read should have read %d bytes", sizeof(data)); + zassert_true(data == 0xaa55aa55, "unexpected value %d", data); +} + +/* + * Test that garbage-collection correctly handles corrupt ate's. + */ +ZTEST_F(zms, test_zms_gc_corrupt_ate) +{ + struct zms_ate corrupt_ate, close_ate; + int err; + + close_ate.id = 0xffffffff; + close_ate.offset = fixture->fs.sector_size / 2; + close_ate.len = 0; + close_ate.crc8 = + crc8_ccitt(0xff, (uint8_t *)&close_ate + SIZEOF_FIELD(struct zms_ate, crc8), + sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); + + corrupt_ate.id = 0xdeadbeef; + corrupt_ate.offset = 0; + corrupt_ate.len = 20; + corrupt_ate.crc8 = 0xff; /* Incorrect crc8 */ + + /* Mark sector 0 as closed */ + err = flash_write(fixture->fs.flash_device, + fixture->fs.offset + fixture->fs.sector_size - 2 * sizeof(struct zms_ate), + &close_ate, sizeof(close_ate)); + zassert_true(err == 0, "flash_write failed: %d", err); + + /* Write a corrupt ate */ + err = flash_write(fixture->fs.flash_device, + fixture->fs.offset + (fixture->fs.sector_size / 2), &corrupt_ate, + sizeof(corrupt_ate)); + zassert_true(err == 0, "flash_write failed: %d", err); + + /* Mark sector 1 as closed */ + err = flash_write(fixture->fs.flash_device, + fixture->fs.offset + (2 * fixture->fs.sector_size) - + 2 * sizeof(struct zms_ate), + &close_ate, sizeof(close_ate)); + zassert_true(err == 0, "flash_write failed: %d", err); + + fixture->fs.sector_count = 3; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); +} + +#ifdef CONFIG_ZMS_LOOKUP_CACHE +static size_t num_matching_cache_entries(uint64_t addr, bool compare_sector_only, struct zms_fs *fs) +{ + size_t i, num = 0; + uint64_t mask = compare_sector_only ? ADDR_SECT_MASK : UINT64_MAX; + + for (i = 0; i < CONFIG_ZMS_LOOKUP_CACHE_SIZE; i++) { + if ((fs->lookup_cache[i] & mask) == addr) { + num++; + } + } + + return num; +} + +static size_t num_occupied_cache_entries(struct zms_fs *fs) +{ + return CONFIG_ZMS_LOOKUP_CACHE_SIZE - + num_matching_cache_entries(ZMS_LOOKUP_CACHE_NO_ADDR, false, fs); +} +#endif + +/* + * Test that ZMS lookup cache is properly rebuilt on zms_mount(), or initialized + * to ZMS_LOOKUP_CACHE_NO_ADDR if the store is empty. + */ +ZTEST_F(zms, test_zms_cache_init) +{ +#ifdef CONFIG_ZMS_LOOKUP_CACHE + int err; + size_t num; + uint64_t ate_addr; + uint8_t data = 0; + + /* Test cache initialization when the store is empty */ + + fixture->fs.sector_count = 3; + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + num = num_occupied_cache_entries(&fixture->fs); + zassert_equal(num, 0, "uninitialized cache"); + + /* Test cache update after zms_write() */ + + ate_addr = fixture->fs.ate_wra; + err = zms_write(&fixture->fs, 1, &data, sizeof(data)); + zassert_equal(err, sizeof(data), "zms_write call failure: %d", err); + + num = num_occupied_cache_entries(&fixture->fs); + zassert_equal(num, 1, "cache not updated after write"); + + num = num_matching_cache_entries(ate_addr, false, &fixture->fs); + zassert_equal(num, 1, "invalid cache entry after write"); + + /* Test cache initialization when the store is non-empty */ + + memset(fixture->fs.lookup_cache, 0xAA, sizeof(fixture->fs.lookup_cache)); + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + num = num_occupied_cache_entries(&fixture->fs); + zassert_equal(num, 1, "uninitialized cache after restart"); + + num = num_matching_cache_entries(ate_addr, false, &fixture->fs); + zassert_equal(num, 1, "invalid cache entry after restart"); +#endif +} + +/* + * Test that even after writing more ZMS IDs than the number of ZMS lookup cache + * entries they all can be read correctly. + */ +ZTEST_F(zms, test_zms_cache_collission) +{ +#ifdef CONFIG_ZMS_LOOKUP_CACHE + int err; + uint32_t id; + uint16_t data; + + fixture->fs.sector_count = 4; + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + for (id = 0; id < CONFIG_ZMS_LOOKUP_CACHE_SIZE + 1; id++) { + data = id; + err = zms_write(&fixture->fs, id, &data, sizeof(data)); + zassert_equal(err, sizeof(data), "zms_write call failure: %d", err); + } + + for (id = 0; id < CONFIG_ZMS_LOOKUP_CACHE_SIZE + 1; id++) { + err = zms_read(&fixture->fs, id, &data, sizeof(data)); + zassert_equal(err, sizeof(data), "zms_read call failure: %d", err); + zassert_equal(data, id, "incorrect data read"); + } +#endif +} + +/* + * Test that ZMS lookup cache does not contain any address from gc-ed sector + */ +ZTEST_F(zms, test_zms_cache_gc) +{ +#ifdef CONFIG_ZMS_LOOKUP_CACHE + int err; + size_t num; + uint16_t data = 0; + + fixture->fs.sector_count = 3; + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + /* Fill the first sector with writes of ID 1 */ + + while (fixture->fs.data_wra + sizeof(data) + sizeof(struct zms_ate) <= + fixture->fs.ate_wra) { + ++data; + err = zms_write(&fixture->fs, 1, &data, sizeof(data)); + zassert_equal(err, sizeof(data), "zms_write call failure: %d", err); + } + + /* Verify that cache contains a single entry for sector 0 */ + + num = num_matching_cache_entries(0ULL << ADDR_SECT_SHIFT, true, &fixture->fs); + zassert_equal(num, 1, "invalid cache content after filling sector 0"); + + /* Fill the second sector with writes of ID 2 */ + + while ((fixture->fs.ate_wra >> ADDR_SECT_SHIFT) != 2) { + ++data; + err = zms_write(&fixture->fs, 2, &data, sizeof(data)); + zassert_equal(err, sizeof(data), "zms_write call failure: %d", err); + } + + /* + * At this point sector 0 should have been gc-ed. Verify that action is + * reflected by the cache content. + */ + + num = num_matching_cache_entries(0ULL << ADDR_SECT_SHIFT, true, &fixture->fs); + zassert_equal(num, 0, "not invalidated cache entries aftetr gc"); + + num = num_matching_cache_entries(2ULL << ADDR_SECT_SHIFT, true, &fixture->fs); + zassert_equal(num, 2, "invalid cache content after gc"); +#endif +} + +/* + * Test ZMS lookup cache hash quality. + */ +ZTEST_F(zms, test_zms_cache_hash_quality) +{ +#ifdef CONFIG_ZMS_LOOKUP_CACHE + const size_t MIN_CACHE_OCCUPANCY = CONFIG_ZMS_LOOKUP_CACHE_SIZE * 6 / 10; + int err; + size_t num; + uint32_t id; + uint16_t data; + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + /* Write ZMS IDs from 0 to CONFIG_ZMS_LOOKUP_CACHE_SIZE - 1 */ + + for (uint16_t i = 0; i < CONFIG_ZMS_LOOKUP_CACHE_SIZE; i++) { + id = i; + data = 0; + + err = zms_write(&fixture->fs, id, &data, sizeof(data)); + zassert_equal(err, sizeof(data), "zms_write call failure: %d", err); + } + + /* Verify that at least 60% cache entries are occupied */ + + num = num_occupied_cache_entries(&fixture->fs); + TC_PRINT("Cache occupancy: %u\n", (unsigned int)num); + zassert_between_inclusive(num, MIN_CACHE_OCCUPANCY, CONFIG_ZMS_LOOKUP_CACHE_SIZE, + "too low cache occupancy - poor hash quality"); + + err = zms_clear(&fixture->fs); + zassert_true(err == 0, "zms_clear call failure: %d", err); + + err = zms_mount(&fixture->fs); + zassert_true(err == 0, "zms_mount call failure: %d", err); + + /* Write CONFIG_ZMS_LOOKUP_CACHE_SIZE ZMS IDs that form the following series: 0, 4, 8... */ + + for (uint16_t i = 0; i < CONFIG_ZMS_LOOKUP_CACHE_SIZE; i++) { + id = i * 4; + data = 0; + + err = zms_write(&fixture->fs, id, &data, sizeof(data)); + zassert_equal(err, sizeof(data), "zms_write call failure: %d", err); + } + + /* Verify that at least 60% cache entries are occupied */ + + num = num_occupied_cache_entries(&fixture->fs); + TC_PRINT("Cache occupancy: %u\n", (unsigned int)num); + zassert_between_inclusive(num, MIN_CACHE_OCCUPANCY, CONFIG_ZMS_LOOKUP_CACHE_SIZE, + "too low cache occupancy - poor hash quality"); + +#endif +} diff --git a/tests/subsys/fs/zms/testcase.yaml b/tests/subsys/fs/zms/testcase.yaml new file mode 100644 index 00000000000000..bdee4529f2af8c --- /dev/null +++ b/tests/subsys/fs/zms/testcase.yaml @@ -0,0 +1,28 @@ +common: + tags: zms +tests: + filesystem.zms: + platform_allow: + - qemu_x86 + filesystem.zms.0x00: + extra_args: DTC_OVERLAY_FILE=boards/qemu_x86_ev_0x00.overlay + platform_allow: qemu_x86 + filesystem.zms.sim.no_erase: + extra_args: CONFIG_FLASH_SIMULATOR_EXPLICIT_ERASE=n + platform_allow: qemu_x86 + filesystem.zms.sim.corrupt_close: + extra_args: + - CONFIG_FLASH_SIMULATOR_EXPLICIT_ERASE=y + - CONFIG_FLASH_SIMULATOR_DOUBLE_WRITES=y + platform_allow: qemu_x86 + filesystem.zms.cache: + extra_args: + - CONFIG_ZMS_LOOKUP_CACHE=y + - CONFIG_ZMS_LOOKUP_CACHE_SIZE=64 + platform_allow: native_sim + filesystem.zms.data_crc: + extra_args: + - CONFIG_ZMS_DATA_CRC=y + platform_allow: + - native_sim + - qemu_x86