From 6f81e4183cee1fe3b1307e4eacc493a85c9ca2d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Widera?= Date: Tue, 12 Dec 2023 12:04:04 +0100 Subject: [PATCH 1/2] fix broken example readGrapes init and finalize is missing in `1_resource.cpp` --- examples/1_resources.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/1_resources.cpp b/examples/1_resources.cpp index eea46124..49845f88 100644 --- a/examples/1_resources.cpp +++ b/examples/1_resources.cpp @@ -5,12 +5,14 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#include #include #include #include int main(int, char*[]) { + redGrapes::init(1); redGrapes::FieldResource< std::vector > a; redGrapes::IOResource b; redGrapes::IOResource c; @@ -35,6 +37,7 @@ int main(int, char*[]) std::cout << "is_serial(user1,user3) = " << redGrapes::ResourceUser::is_serial(user1,user3) << std::endl; std::cout << "is_serial(user2,user3) = " << redGrapes::ResourceUser::is_serial(user2,user3) << std::endl; + redGrapes::finalize(); return 0; } From bc797f74a3bd8a746fea8270cd766553eb3935f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Widera?= Date: Tue, 12 Dec 2023 12:17:02 +0100 Subject: [PATCH 2/2] fix memory leak With #44 we tried to fix the memory leak which can happen if more than one thread is allocating a new chunk. The PR #44 was not fixing the issue and introduced a double free issue. --- redGrapes/util/chunked_list.hpp | 53 ++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/redGrapes/util/chunked_list.hpp b/redGrapes/util/chunked_list.hpp index 92d2a531..83392891 100644 --- a/redGrapes/util/chunked_list.hpp +++ b/redGrapes/util/chunked_list.hpp @@ -79,7 +79,7 @@ template < > struct ChunkedList { - using chunk_offset_t = int16_t; + using chunk_offset_t = uint16_t; using refcount_t = uint16_t; struct Item @@ -163,14 +163,11 @@ struct ChunkedList struct Chunk { - /* counts the number of alive elements in this chunk. - * Whenever `item_count` reaches zero, the chunk will be deleted. - * `item_count` starts with 1 to keep the chunk at least until - * its full capacity is used. This initial offset is - * compensated by not increasing the count when the last - * element is inserted. + /* counts the number of freed elements in this chunk. + * Whenever `freed_items` reaches the last item in this chunk, the chunk + * will be deleted. */ - std::atomic< chunk_offset_t > item_count{ 0 }; + std::atomic< chunk_offset_t > freed_items{ 0 }; /* lowest index with free slot that can * be used to add a new element @@ -425,6 +422,9 @@ struct ChunkedList size_t items_capacity = (chunks.get_chunk_capacity() - sizeof(Chunk)); this->chunk_size = items_capacity / sizeof(Item); assert( chunk_size < std::numeric_limits< chunk_offset_t >::max() ); + + // start with one batch of chunks + chunks.allocate_item(); } ChunkedList( ChunkedList && other ) = default; @@ -440,7 +440,7 @@ struct ChunkedList */ void release_chunk( typename memory::AtomicList< Chunk, Allocator >::MutBackwardIterator chunk ) { - if( chunk->item_count.fetch_sub(1) == 0 ) + if( chunk->freed_items.fetch_add(1) == chunk_size - 1u ) chunks.erase( chunk ); } @@ -453,25 +453,30 @@ struct ChunkedList auto chunk = chunks.rbegin(); if( chunk != chunks.rend() ) { - if( chunk->item_count.fetch_add(1) < chunk_size ) - { - unsigned chunk_off = chunk->next_idx.fetch_add(1); - - if( chunk_off < chunk_size ) - { - + unsigned chunk_off = chunk->next_idx.fetch_add(1); + + if( chunk_off < chunk_size ) + { chunk->items()[ chunk_off ] = item; chunk->last_idx ++; + if( chunk_off == chunk_size - 1) + { + // the thread who took the last item of this chunk must allocate + // the next batch of items + chunks.allocate_item(); + } return MutBackwardIterator( chunk_size, chunk, chunk_off ); - } - } - - release_chunk(chunk); + } + else + { + // avoid that the counter can overflow + chunk->next_idx--; + } + } + else + { + throw std::runtime_error("chunk_list: invalid state, there should always be at least one chunk available."); } - - auto prev_chunk = chunks.allocate_item(); - if( prev_chunk != chunks.rend() ) - release_chunk( prev_chunk ); } }