From 7262258d76061e5a8318f1251fa8261257577120 Mon Sep 17 00:00:00 2001 From: Riadh Ghaddab Date: Wed, 30 Oct 2024 14:45:27 +0100 Subject: [PATCH] fs: zms: multiple style fixes from previous PR review This resolves some addressed comments in this PR zephyrproject-rtos#77930 as well as this PR zephyrproject-rtos#80407 Signed-off-by: Riadh Ghaddab --- doc/services/storage/zms/zms.rst | 300 ++++++++++++++----------------- doc/zephyr.doxyfile.in | 1 + include/zephyr/fs/zms.h | 73 ++++++-- subsys/fs/zms/CMakeLists.txt | 2 +- subsys/fs/zms/Kconfig | 25 ++- subsys/fs/zms/zms_priv.h | 60 ++++--- 6 files changed, 238 insertions(+), 223 deletions(-) diff --git a/doc/services/storage/zms/zms.rst b/doc/services/storage/zms/zms.rst index 02fed3cf77c4db..1f5a3f97fdeabe 100644 --- a/doc/services/storage/zms/zms.rst +++ b/doc/services/storage/zms/zms.rst @@ -15,15 +15,15 @@ pairs until it is full. The key-value pair is divided into two parts: - The key part is written in an ATE (Allocation Table Entry) called "ID-ATE" which is stored - starting from the bottom of the sector -- The value part is defined as "DATA" and is stored raw starting from the top of the sector + starting from the bottom of the sector. +- The value part is defined as "data" and is stored raw starting from the top of the sector. -Additionally, for each sector we store at the last positions Header-ATEs which are ATEs that +Additionally, for each sector we store at the last positions header ATEs which are ATEs that are needed for the sector to describe its status (closed, open) and the current version of ZMS. When the current sector is full we verify first that the following sector is empty, we garbage -collect the N+2 sector (where N is the current sector number) by moving the valid ATEs to the -N+1 empty sector, we erase the garbage collected sector and then we close the current sector by +collect the sector N+2 (where N is the current sector number) by moving the valid ATEs to the +N+1 empty sector, we erase the garbage-collected sector and then we close the current sector by writing a garbage_collect_done ATE and the close ATE (one of the header entries). Afterwards we move forward to the next sector and start writing entries again. @@ -60,50 +60,50 @@ A sector is organized in this form (example with 3 sectors): - . - . * - . - - ATE_b2 - - ATE_c2 - * - ATE_a2 - - ATE_b1 - - ATE_c1 - * - ATE_a1 - - ATE_b0 - - ATE_c0 - * - ATE_a0 - - GC_done - - GC_done - * - Close (cyc=1) - - Close (cyc=1) - - Close (cyc=1) - * - Empty (cyc=1) - - Empty (cyc=2) - - Empty (cyc=2) + - ID ATE_b2 + - ID ATE_c2 + * - ID ATE_a2 + - ID ATE_b1 + - ID ATE_c1 + * - ID ATE_a1 + - ID ATE_b0 + - ID ATE_c0 + * - ID ATE_a0 + - GC_done ATE + - GC_done ATE + * - Close ATE (cyc=1) + - Close ATE (cyc=1) + - Close ATE (cyc=1) + * - Empty ATE (cyc=1) + - Empty ATE (cyc=2) + - Empty ATE (cyc=2) Definition of each element in the sector ======================================== -``Empty ATE:`` is written when erasing a sector (last position of the sector). +``Empty ATE`` is written when erasing a sector (last position of the sector). -``Close ATE:`` is written when closing a sector (second to last position of the sector). +``Close ATE`` is written when closing a sector (second to last position of the sector). -``GC_done ATE:`` is written to indicate that the next sector has been already garbage -collected. This ATE could be in any position of the sector. +``GC_done ATE`` is written to indicate that the next sector has already been garbage-collected. +This ATE could be at any position of the sector. -``ID-ATE:`` are entries that contain a 32 bits Key and describe where the data is stored, its -size and its crc32 +``ID ATE`` are entries that contain a 32-bit key and describe where the data is stored, its +size and its CRC32. -``Data:`` is the actual value associated to the ID-ATE +``Data`` is the actual value associated to the ID-ATE. How does ZMS work? ****************** -Mounting the Storage system +Mounting the storage system =========================== -Mounting the storage starts by getting the flash parameters, checking that the file system +Mounting the storage system starts by getting the flash parameters, checking that the file system properties are correct (sector_size, sector_count ...) then calling the zms_init function to make the storage ready. -To mount the filesystem some elements in the zms_fs structure must be initialized. +To mount the filesystem some elements in the ``zms_fs`` structure must be initialized. .. code-block:: c @@ -125,43 +125,44 @@ To mount the filesystem some elements in the zms_fs structure must be initialize Initialization ============== -As ZMS has a fast-forward write mechanism, we must find the last sector and the last pointer of +As ZMS has a fast-forward write mechanism, it must find the last sector and the last pointer of the entry where it stopped the last time. It must look for a closed sector followed by an open one, then within the open sector, it finds -(recover) the last written ATE (Allocation Table Entry). +(recovers) the last written ATE. After that, it checks that the sector after this one is empty, or it will erase it. -ZMS ID-Data write +ZMS ID/data write =================== -To avoid rewriting the same data with the same ID again, it must look in all the sectors if the -same ID exist then compares its data, if the data is identical no write is performed. -If we must perform a write, then an ATE and Data (if not a delete) are written in the sector. -If the sector is full (cannot hold the current data + ATE) we have to move to the next sector, +To avoid rewriting the same data with the same ID again, ZMS must look in all the sectors if the +same ID exists and then compares its data. If the data is identical, no write is performed. +If it must perform a write, then an ATE and the data (if the operation is not a delete) are written +in the sector. +If the sector is full (cannot hold the current data + ATE), ZMS has to move to the next sector, garbage collect the sector after the newly opened one then erase it. -Data size that is smaller or equal to 8 bytes are written within the ATE. +Data whose size is smaller or equal to 8 bytes are written within the ATE. ZMS ID/data read (with history) =============================== -By default it looks for the last data with the same ID by browsing through all stored ATEs from +By default ZMS looks for the last data with the same ID by browsing through all stored ATEs from the most recent ones to the oldest ones. If it finds a valid ATE with a matching ID it retrieves its data and returns the number of bytes that were read. -If history count is provided that is different than 0, older data with same ID is retrieved. +If a history count is provided and different than 0, older data with same ID is retrieved. ZMS free space calculation ========================== ZMS can also return the free space remaining in the partition. -However, this operation is very time consuming and needs to browse all valid ATEs in all sectors -of the partition and for each valid ATE try to find if an older one exist. -It is not recommended for application to use this function often, as it is time consuming and +However, this operation is very time-consuming as it needs to browse through all valid ATEs +in all sectors of the partition and for each valid ATE try to find if an older one exists. +It is not recommended for applications to use this function often, as it is time-consuming and could slow down the calling thread. The cycle counter ================= -Each sector has a lead cycle counter which is a uin8_t that is used to validate all the other +Each sector has a lead cycle counter which is a ``uin8_t`` that is used to validate all the other ATEs. The lead cycle counter is stored in the empty ATE. To become valid, an ATE must have the same cycle counter as the one stored in the empty ATE. @@ -179,88 +180,68 @@ counter as the empty ATE. When closing a sector, all the remaining space that has not been used is filled with garbage data to avoid having old ATEs with a valid cycle counter. -Triggering Garbage collection +Triggering garbage collection ============================= Some applications need to make sure that storage writes have a maximum defined latency. -When calling a ZMS write, the current sector could be almost full and we need to trigger the GC -to switch to the next sector. -This operation is time consuming and it will cause some applications to not meet their real time +When calling ZMS to make a write, the current sector could be almost full such that ZMS needs to +trigger the GC to switch to the next sector. +This operation is time-consuming and will cause some applications to not meet their real time constraints. ZMS adds an API for the application to get the current remaining free space in a sector. -The application could then decide when needed to switch to the next sector if the current one is -almost full and of course it will trigger the garbage collection on the next sector. +The application could then decide when to switch to the next sector if the current one is almost +full. This will of course trigger the garbage collection operation on the next sector. This will guarantee the application that the next write won't trigger the garbage collection. ATE (Allocation Table Entry) structure ====================================== -An entry has 16 bytes divided between these variables : +An entry has 16 bytes divided between these fields: -.. code-block:: c +See the :c:struct:`zms_ate` structure. - struct zms_ate { - uint8_t crc8; /* crc8 check of the entry */ - uint8_t cycle_cnt; /* cycle counter for non-erasable devices */ - uint16_t len; /* data len within sector */ - uint32_t id; /* data id */ - union { - uint8_t data[8]; /* used to store small size data */ - struct { - uint32_t offset; /* data offset within sector */ - union { - uint32_t data_crc; /* crc for data */ - uint32_t metadata; /* Used to store metadata information - * such as storage version. - */ - }; - }; - }; - } __packed; - -.. note:: The CRC of the data is checked only when the whole the element is read. +.. note:: The CRC of the data is checked only when a full read of the data is made. The CRC of the data is not checked for a partial read, as it is computed for the whole element. -.. note:: Enabling the CRC feature on previously existing ZMS content without CRC enabled - will make all existing data invalid. - -.. _free-space: +.. warning:: Enabling the CRC feature on previously existing ZMS content that did not have it + enabled will make all existing data invalid. Available space for user data (key-value pairs) *********************************************** -For both scenarios ZMS should always have an empty sector to be able to perform the -garbage collection (GC). +ZMS always needs an empty sector to be able to perform the garbage collection (GC). So, if we suppose that 4 sectors exist in a partition, ZMS will only use 3 sectors to store -Key-value pairs and keep one sector empty to be able to launch GC. +key-value pairs and keep one sector empty to be able to perform GC. The empty sector will rotate between the 4 sectors in the partition. -.. note:: The maximum single data length that could be written at once in a sector is 64K - (This could change in future versions of ZMS) +.. note:: The maximum single data length that can be written at once in a sector is 64K + (this could change in future versions of ZMS). Small data values ================= -Values smaller than 8 bytes will be stored within the entry (ATE) itself, without writing data -at the top of the sector. +Values smaller than or equal to 8 bytes will be stored within the entry (ATE) itself, without +writing data at the top of the sector. ZMS has an entry size of 16 bytes which means that the maximum available space in a partition to -store data is computed in this scenario as : +store data is computed in this scenario as: .. math:: - \small\frac{(NUM\_SECTORS - 1) \times (SECTOR\_SIZE - (5 \times ATE\_SIZE))}{2} + \small\frac{(NUM\_SECTORS - 1) \times (SECTOR\_SIZE - (5 \times ATE\_SIZE)) \times (DATA\_SIZE)}{ATE\_SIZE} Where: -``NUM_SECTOR:`` Total number of sectors +``NUM_SECTOR``: Total number of sectors + +``SECTOR_SIZE``: Size of the sector -``SECTOR_SIZE:`` Size of the sector +``ATE_SIZE``: 16 bytes -``ATE_SIZE:`` 16 bytes +``(5 * ATE_SIZE)``: Reserved ATEs for header and delete items -``(5 * ATE_SIZE):`` Reserved ATEs for header and delete items +``DATA_SIZE``: Size of the small data values (range from 1 to 8) -For example for 4 sectors of 1024 bytes, free space for data is :math:`\frac{3 \times 944}{2} = 1416 \, \text{ bytes}`. +For example for 4 sectors of 1024 bytes, free space for 8-byte length data is :math:`\frac{3 \times 944 \times 8}{16} = 1416 \, \text{ bytes}`. Large data values ================= @@ -274,67 +255,66 @@ Let's take an example: For a partition that has 4 sectors of 1024 bytes and for data size of 64 bytes. Only 3 sectors are available for writes with a capacity of 944 bytes each. -Each Key-value pair needs an extra 16 bytes for ATE which makes it possible to store 11 pairs -in each sectors (:math:`\frac{944}{80}`). -Total data that could be stored in this partition for this case is :math:`11 \times 3 \times 64 = 2112 \text{ bytes}` - -.. _wear-leveling: +Each key-value pair needs an extra 16 bytes for the ATE, which makes it possible to store 11 pairs +in each sector (:math:`\frac{944}{80}`). +Total data that could be stored in this partition for this case is :math:`11 \times 3 \times 64 = 2112 \text{ bytes}`. Wear leveling ************* This storage system is optimized for devices that do not require an erase. -Using storage systems that rely on an erase-value (NVS as an example) will need to emulate the -erase with write operations. This will cause a significant decrease in the life expectancy of -these devices and will cause more delays for write operations and for initialization. -ZMS uses a cycle count mechanism that avoids emulating erase operation for these devices. +Storage systems that rely on an erase value (NVS as an example) need to emulate the erase with +write operations. This causes a significant decrease in the life expectancy of these devices +as well as more delays for write operations and initialization of the device when it is empty. +ZMS uses a cycle count mechanism that avoids emulating erase operations for these devices. It also guarantees that every memory location is written only once for each cycle of sector write. -As an example, to erase a 4096 bytes sector on a non-erasable device using NVS, 256 flash writes -must be performed (supposing that write-block-size=16 bytes), while using ZMS only 1 write of -16 bytes is needed. This operation is 256 times faster in this case. +As an example, to erase a 4096-byte sector on devices that do not require an erase operation +using NVS, 256 flash writes must be performed (supposing that ``write-block-size`` = 16 bytes), while +using ZMS, only 1 write of 16 bytes is needed. This operation is 256 times faster in this case. -Garbage collection operation is also adding some writes to the memory cell life expectancy as it -is moving some blocks from one sector to another. +The garbage collection operation also reduces the memory cell life expectancy as it performs write +operations when moving blocks from one sector to another. To make the garbage collector not affect the life expectancy of the device it is recommended -to correctly dimension the partition size. Its size should be the double of the maximum size of -data (including extra headers) that could be written in the storage. +to dimension the partition appropriately. Its size should be the double of the maximum size of +data (including headers) that could be written in the storage. -See :ref:`free-space`. +See `Available space for user data <#Available-space-for-user-data-key-value-pairs>`_. Device lifetime calculation =========================== -Storage devices whether they are classical Flash or new technologies like RRAM/MRAM has a limited -life expectancy which is determined by the number of times memory cells can be erased/written. +Storage devices, whether they are classical flash or new technologies like RRAM/MRAM, have a +limited life expectancy which is determined by the number of times memory cells can be +erased/written. Flash devices are erased one page at a time as part of their functional behavior (otherwise -memory cells cannot be overwritten) and for non-erasable storage devices memory cells can be -overwritten directly. +memory cells cannot be overwritten), and for storage devices that do not require an erase +operation, memory cells can be overwritten directly. A typical scenario is shown here to calculate the life expectancy of a device: -Let's suppose that we store an 8 bytes variable using the same ID but its content changes every +Let's suppose that we store an 8-byte variable using the same ID but its content changes every minute. The partition has 4 sectors with 1024 bytes each. Each write of the variable requires 16 bytes of storage. As we have 944 bytes available for ATEs for each sector, and because ZMS is a fast-forward storage system, we are going to rewrite the first location of the first sector after :math:`\frac{(944 \times 4)}{16} = 236 \text{ minutes}`. -In addition to the normal writes, garbage collector will move the still valid data from old -sectors to new ones. +In addition to the normal writes, the garbage collector will move the data that is still valid +from old sectors to new ones. As we are using the same ID and a big partition size, no data will be moved by the garbage collector in this case. -For storage devices that could be written 20000 times, the storage will last about -4.720.000 minutes (~9 years). +For storage devices that can be written 20 000 times, the storage will last about +4 720 000 minutes (~9 years). To make a more general formula we must first compute the effective used size in ZMS by our typical set of data. -For id/data pair with data <= 8 bytes, effective_size is 16 bytes -For id/data pair with data > 8 bytes, effective_size is 16 bytes + sizeof(data) -Let's suppose that total_effective_size is the total size of the set of data that is written in -the storage and that the partition is well dimensioned (double of the effective size) to avoid +For ID/data pairs with data <= 8 bytes, ``effective_size`` is 16 bytes. +For ID/data pairs with data > 8 bytes, ``effective_size`` is ``16 + sizeof(data)`` bytes. +Let's suppose that ``total_effective_size`` is the total size of the data that is written in +the storage and that the partition is sized appropriately (double of the effective size) to avoid having the garbage collector moving blocks all the time. -The expected life of the device in minutes is computed as : +The expected lifetime of the device in minutes is computed as: .. math:: @@ -342,11 +322,11 @@ The expected life of the device in minutes is computed as : Where: -``SECTOR_EFFECTIVE_SIZE``: is the size sector - header_size(80 bytes) +``SECTOR_EFFECTIVE_SIZE``: The sector size - header size (80 bytes) -``SECTOR_NUMBER``: is the number of sectors +``SECTOR_NUMBER``: The number of sectors -``MAX_NUM_WRITES``: is the life expectancy of the storage device in number of writes +``MAX_NUM_WRITES``: The life expectancy of the storage device in number of writes ``TOTAL_EFFECTIVE_SIZE``: Total effective size of the set of written data @@ -360,15 +340,16 @@ such as low latency and bigger storage space. Existing features ================= -Version1 --------- -- Supports non-erasable devices (only one write operation to erase a sector) -- Supports large partition size and sector size (64 bits address space) -- Supports 32-bit IDs to store ID/Value pairs -- Small sized data ( <= 8 bytes) are stored in the ATE itself -- Built-in Data CRC32 (included in the ATE) -- Versionning of ZMS (to handle future evolution) -- Supports large write-block-size (Only for platforms that need this) +Version 1 +--------- +- Supports storage devices that do not require an erase operation (only one write operation + to invalidate a sector) +- Supports large partition and sector sizes (64-bit address space) +- Supports 32-bit IDs +- Small-sized data (<= 8 bytes) are stored in the ATE itself +- Built-in data CRC32 (included in the ATE) +- Versioning of ZMS (to handle future evolutions) +- Supports large ``write-block-size`` (only for platforms that need it) Future features =============== @@ -395,7 +376,7 @@ functionality: :ref:`NVS ` and :ref:`FCB `. Which one to use in your application will depend on your needs and the hardware you are using, and this section provides information to help make a choice. -- If you are using a non-erasable technology device like RRAM or MRAM, :ref:`ZMS ` is definitely the +- If you are using devices that do not require an erase operation like RRAM or MRAM, :ref:`ZMS ` is definitely the best fit for your storage subsystem as it is designed to avoid emulating erase operation using large block writes for these devices and replaces it with a single write call. - For devices with large write_block_size and/or needs a sector size that is different than the @@ -413,7 +394,7 @@ and this section provides information to help make a choice. More generally to make the right choice between NVS and ZMS, all the blockers should be first verified to make sure that the application could work with one subsystem or the other, then if both solutions could be implemented, the best choice should be based on the calculations of the -life expectancy of the device described in this section: :ref:`wear-leveling`. +life expectancy of the device described in this section: `Wear leveling <#wear-leveling>`_. Recommendations to increase performance *************************************** @@ -421,44 +402,41 @@ Recommendations to increase performance Sector size and count ===================== -- The total size of the storage partition should be well dimensioned to achieve the best - performance for ZMS. +- The total size of the storage partition should be set appropriately to achieve the best + performance with ZMS. All the information regarding the effectively available free space in ZMS can be found - in the documentation. See :ref:`free-space`. - We recommend choosing a storage partition that can hold double the size of the key-value pairs + in the documentation. See `Available space for user data <#Available-space-for-user-data-key-value-pairs>`_. + It's recommended to choose a storage partition size that is double the size of the key-value pairs that will be written in the storage. -- The size of a sector needs to be dimensioned to hold the maximum data length that will be stored. - Increasing the size of a sector will slow down the garbage collection operation which will - occur less frequently. - Decreasing its size, in the opposite, will make the garbage collection operation faster - which will occur more frequently. +- The sector size needs to be set such that a sector can fit the maximum data size that will be + stored. + Increasing the sector size will slow down the garbage collection operation and make it occur + less frequently. + Decreasing its size, on the opposite, will make the garbage collection operation faster but also + occur more frequently. - For some subsystems like :ref:`Settings `, all path-value pairs are split into two ZMS entries (ATEs). - The header needed by the two entries should be accounted when computing the needed storage space. -- Using small data to store in the ZMS entries can increase the performance, as this data is - written within the entry header. + The headers needed by the two entries should be accounted for when computing the needed storage + space. +- Storing small data (<= 8 bytes) in ZMS entries can increase the performance, as this data is + written within the entry. For example, for the :ref:`Settings ` subsystem, choosing a path name that is less than or equal to 8 bytes can make reads and writes faster. -Dimensioning cache -================== +Cache size +========== -- When using ZMS API directly, the recommended cache size should be, at least, equal to - the number of different entries that will be written in the storage. +- When using the ZMS API directly, the recommendation for the cache size is to make it at least + equal to the number of different entries that will be written in the storage. - Each additional cache entry will add 8 bytes to your RAM usage. Cache size should be carefully chosen. - If you use ZMS through :ref:`Settings `, you have to take into account that each Settings entry is - divided into two ZMS entries. The recommended cache size should be, at least, twice the number - of Settings entries. - -Sample -****** - -A sample of how ZMS can be used is supplied in :zephyr:code-sample:`zms`. + divided into two ZMS entries. The recommendation for the cache size is to make it at least + twice the number of Settings entries. API Reference ************* -The ZMS subsystem APIs are provided by ``zms.h``: +The ZMS API is provided by ``zms.h``: .. doxygengroup:: zms_data_structures diff --git a/doc/zephyr.doxyfile.in b/doc/zephyr.doxyfile.in index abd4440f5aa160..6db454ddf8217c 100644 --- a/doc/zephyr.doxyfile.in +++ b/doc/zephyr.doxyfile.in @@ -980,6 +980,7 @@ INPUT = @ZEPHYR_BASE@/doc/_doxygen/mainpage.md \ @ZEPHYR_BASE@/subsys/testsuite/include/ \ @ZEPHYR_BASE@/subsys/testsuite/ztest/include/ \ @ZEPHYR_BASE@/subsys/secure_storage/include/ \ + @ZEPHYR_BASE@/subsys/fs/zms/zms_priv.h \ # This tag can be used to specify the character encoding of the source files # that Doxygen parses. Internally Doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/include/zephyr/fs/zms.h b/include/zephyr/fs/zms.h index 0f0fbb82cc94cf..27d376394278c5 100644 --- a/include/zephyr/fs/zms.h +++ b/include/zephyr/fs/zms.h @@ -80,8 +80,13 @@ struct zms_fs { * @brief Mount a ZMS file system onto the device specified in `fs`. * * @param fs Pointer to the file system. - * @retval 0 Success - * @retval -ERRNO Negative errno code on error + * + * @retval 0 on success. + * @retval -EDEADLK if the detected file system is not ZMS. + * @retval -ENOEXEC if ZMS version is not supported. + * @retval -EINVAL if any of the flash parameters or the sector layout is invalid. + * @retval -ENXIO if there is a device error. + * @retval -EIO if there is a memory read/write error. */ int zms_mount(struct zms_fs *fs); @@ -89,8 +94,11 @@ int zms_mount(struct zms_fs *fs); * @brief Clear the ZMS file system from device. * * @param fs Pointer to the file system. - * @retval 0 Success - * @retval -ERRNO Negative errno code on error + * + * @retval 0 on success. + * @retval -EACCES if ZMS is still not initialized. + * @retval -ENXIO if there is a device error. + * @retval -EIO if there is a memory read/write error. */ int zms_clear(struct zms_fs *fs); @@ -102,14 +110,20 @@ int zms_clear(struct zms_fs *fs); * entry and an entry with data of length 0. * * @param fs Pointer to the file system. - * @param id ID of the entry to be written - * @param data Pointer to the data to be written - * @param len Number of bytes to be written (maximum 64 KiB) + * @param id ID of the entry to be written. + * @param data Pointer to the data to be written. + * @param len Number of bytes to be written (maximum 64 KiB). * * @return Number of bytes written. On success, it will be equal to the number of bytes requested * to be written or 0. * When a rewrite of the same data already stored is attempted, nothing is written to flash, * thus 0 is returned. On error, returns negative value of error codes defined in `errno.h`. + * @retval >=0 on success, number of bytes written. + * @retval -EACCES if ZMS is still not initialized. + * @retval -ENXIO if there is a device error. + * @retval -EIO if there is a memory read/write error. + * @retval -EINVAL if len is in invalid value + * @retval -ENOSPC if no space is left on device. */ ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len); @@ -117,9 +131,12 @@ ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len); * @brief Delete an entry from the file system * * @param fs Pointer to the file system. - * @param id ID of the entry to be deleted - * @retval 0 Success - * @retval -ERRNO Negative errno code on error + * @param id ID of the entry to be deleted. + * + * @retval 0 on success. + * @retval -EACCES if ZMS is still not initialized. + * @retval -ENXIO if there is a device error. + * @retval -EIO if there is a memory read/write error. */ int zms_delete(struct zms_fs *fs, uint32_t id); @@ -127,13 +144,17 @@ int zms_delete(struct zms_fs *fs, uint32_t id); * @brief Read an entry from the file system. * * @param fs Pointer to the file system. - * @param id ID of the entry to be read - * @param data Pointer to data buffer - * @param len Number of bytes to read at most + * @param id ID of the entry to be read. + * @param data Pointer to data buffer. + * @param len Number of bytes to read at most. * * @return Number of bytes read. On success, it will be equal to the number of bytes requested * to be read or less than that if the stored data has a smaller size than the requested one. * On error, returns negative value of error codes defined in `errno.h`. + * @retval >=0 on success, number of bytes read. + * @retval -EACCES if ZMS is still not initialized. + * @retval -EIO if there is a memory read/write error. + * @retval -ENOENT if there is no entry with the given `id`. */ ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len); @@ -141,26 +162,34 @@ ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len); * @brief Read a history entry from the file system. * * @param fs Pointer to the file system. - * @param id ID of the entry to be read - * @param data Pointer to data buffer - * @param len Number of bytes to be read + * @param id ID of the entry to be read. + * @param data Pointer to data buffer. + * @param len Number of bytes to be read. * @param cnt History counter: 0: latest entry, 1: one before latest ... * * @return Number of bytes read. On success, it will be equal to the number of bytes requested * to be read. When the return value is larger than the number of bytes requested to read this * indicates not all bytes were read, and more data is available. On error, returns negative * value of error codes defined in `errno.h`. + * @retval >=0 on success, number of bytes read. + * @retval -EACCES if ZMS is still not initialized. + * @retval -EIO if there is a memory read/write error. + * @retval -ENOENT if there is no entry with the given id and history counter. */ ssize_t zms_read_hist(struct zms_fs *fs, uint32_t id, void *data, size_t len, uint32_t cnt); /** - * @brief Gets the length of the data that is stored in an entry with a given ID + * @brief Gets the length of the data that is stored in an entry with a given `id` * * @param fs Pointer to the file system. * @param id ID of the entry whose data length to retrieve. * * @return Data length contained in the ATE. On success, it will be equal to the number of bytes * in the ATE. On error, returns negative value of error codes defined in `errno.h`. + * @retval >=0 on success, length of the entry with the given `id`. + * @retval -EACCES if ZMS is still not initialized. + * @retval -EIO if there is a memory read/write error. + * @retval -ENOENT if there is no entry with the given id and history counter. */ ssize_t zms_get_data_length(struct zms_fs *fs, uint32_t id); @@ -173,6 +202,9 @@ ssize_t zms_get_data_length(struct zms_fs *fs, uint32_t id); * still be written to the file system. * Calculating the free space is a time-consuming operation, especially on SPI flash. * On error, returns negative value of error codes defined in `errno.h`. + * @retval >=0 on success, number of free bytes. + * @retval -EACCES if ZMS is still not initialized. + * @retval -EIO if there is a memory read/write error. */ ssize_t zms_calc_free_space(struct zms_fs *fs); @@ -181,7 +213,8 @@ ssize_t zms_calc_free_space(struct zms_fs *fs); * * @param fs Pointer to the file system. * - * @return Number of free bytes. + * @retval >=0 Number of free bytes in the currently active sector + * @retval -EACCES if ZMS is still not initialized. */ size_t zms_active_sector_free_space(struct zms_fs *fs); @@ -196,7 +229,9 @@ size_t zms_active_sector_free_space(struct zms_fs *fs); * * @param fs Pointer to the file system. * - * @return 0 on success. On error, returns negative value of error codes defined in `errno.h`. + * @retval 0 on success. + * @retval -EACCES if ZMS is still not initialized. + * @retval -EIO if there is a memory read/write error. */ int zms_sector_use_next(struct zms_fs *fs); diff --git a/subsys/fs/zms/CMakeLists.txt b/subsys/fs/zms/CMakeLists.txt index b6db8a3f57fa9a..91e4651c3f6620 100644 --- a/subsys/fs/zms/CMakeLists.txt +++ b/subsys/fs/zms/CMakeLists.txt @@ -1,3 +1,3 @@ -#SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: Apache-2.0 zephyr_sources(zms.c) diff --git a/subsys/fs/zms/Kconfig b/subsys/fs/zms/Kconfig index dd3c5a184e1cf3..88a31d615a25ca 100644 --- a/subsys/fs/zms/Kconfig +++ b/subsys/fs/zms/Kconfig @@ -1,14 +1,16 @@ -#Copyright (c) 2024 BayLibre SAS +# Copyright (c) 2024 BayLibre SAS -#SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: Apache-2.0 -#Zephyr Memory Storage ZMS +# Zephyr Memory Storage ZMS config ZMS bool "Zephyr Memory Storage" select CRC help - Enable support of Zephyr Memory Storage. + Enable Zephyr Memory Storage, which is a key-value storage system designed to work with + all types of non-volatile storage technologies. + It supports classical on-chip NOR flash as well as new technologies like RRAM and MRAM. if ZMS @@ -20,19 +22,16 @@ config ZMS_LOOKUP_CACHE table entry (ATE) for all ZMS IDs that fall into that cache position. config ZMS_LOOKUP_CACHE_SIZE - int "ZMS Storage lookup cache size" + int "ZMS lookup cache size" default 128 range 1 65536 depends on ZMS_LOOKUP_CACHE help - Number of entries in ZMS lookup cache. - It is recommended that it should be a power of 2. - Every additional entry in cache will add 8 bytes in RAM + Number of entries in the ZMS lookup cache. + Every additional entry in cache will use 8 bytes of RAM. config ZMS_DATA_CRC - bool "ZMS DATA CRC" - help - Enables DATA CRC + bool "ZMS data CRC" config ZMS_CUSTOMIZE_BLOCK_SIZE bool "Customize the size of the buffer used internally for reads and writes" @@ -40,8 +39,8 @@ config ZMS_CUSTOMIZE_BLOCK_SIZE ZMS uses an internal buffer to read/write and compare stored data. Increasing the size of this buffer should be done carefully in order to not overflow the stack. - Increasing this buffer means as well that ZMS could work with storage devices - that have larger write-block-size which decreases ZMS performance + Increasing it makes ZMS able to work with storage devices + that have a larger `write-block-size` (which decreases the performance of ZMS). config ZMS_CUSTOM_BLOCK_SIZE int "ZMS internal buffer size" diff --git a/subsys/fs/zms/zms_priv.h b/subsys/fs/zms/zms_priv.h index 428ff6babca862..e2cbf5f08bb59d 100644 --- a/subsys/fs/zms/zms_priv.h +++ b/subsys/fs/zms/zms_priv.h @@ -8,15 +8,11 @@ #ifndef __ZMS_PRIV_H_ #define __ZMS_PRIV_H_ -#ifdef __cplusplus -extern "C" { -#endif - /* - * MASKS AND SHIFT FOR ADDRESSES - * an address in zms is an uint64_t where: - * high 4 bytes represent the sector number - * low 4 bytes represent the offset in a sector + * MASKS AND SHIFT FOR ADDRESSES. + * An address in zms is an uint64_t where: + * - high 4 bytes represent the sector number + * - low 4 bytes represent the offset in a sector */ #define ADDR_SECT_MASK GENMASK64(63, 32) #define ADDR_SECT_SHIFT 32 @@ -44,34 +40,40 @@ extern "C" { #define ZMS_INVALID_SECTOR_NUM -1 #define ZMS_DATA_IN_ATE_SIZE 8 +/** + * @ingroup zms_data_structures + * ZMS Allocation Table Entry (ATE) structure + */ struct zms_ate { - uint8_t crc8; /* crc8 check of the entry */ - uint8_t cycle_cnt; /* cycle counter for non erasable devices */ - uint16_t len; /* data len within sector */ - uint32_t id; /* data id */ + /** crc8 check of the entry */ + uint8_t crc8; + /** cycle counter for non erasable devices */ + uint8_t cycle_cnt; + /** data len within sector */ + uint16_t len; + /** data id */ + uint32_t id; union { - uint8_t data[8]; /* used to store small size data */ + /** data field used to store small sized data */ + uint8_t data[8]; struct { - uint32_t offset; /* data offset within sector */ + /** data offset within sector */ + uint32_t offset; union { - uint32_t data_crc; /* - * crc for data: The data CRC is checked only - * when the whole data of the element is read. - * The data CRC is not checked for a partial - * read, as it is computed for the complete - * set of data. - */ - uint32_t metadata; /* - * Used to store metadata information - * such as storage version. - */ + /** + * crc for data: The data CRC is checked only when the whole data + * of the element is read. + * The data CRC is not checked for a partial read, as it is computed + * for the complete set of data. + */ + uint32_t data_crc; + /** + * Used to store metadata information such as storage version. + */ + uint32_t metadata; }; }; }; } __packed; -#ifdef __cplusplus -} -#endif - #endif /* __ZMS_PRIV_H_ */