diff --git a/redGrapes/util/chunked_list.hpp b/redGrapes/util/chunked_list.hpp index 92d2a531..c2e2b33f 100644 --- a/redGrapes/util/chunked_list.hpp +++ b/redGrapes/util/chunked_list.hpp @@ -79,7 +79,7 @@ template < > struct ChunkedList { - using chunk_offset_t = int16_t; + using chunk_offset_t = uint16_t; using refcount_t = uint16_t; struct Item @@ -163,14 +163,11 @@ struct ChunkedList struct Chunk { - /* counts the number of alive elements in this chunk. - * Whenever `item_count` reaches zero, the chunk will be deleted. - * `item_count` starts with 1 to keep the chunk at least until - * its full capacity is used. This initial offset is - * compensated by not increasing the count when the last - * element is inserted. + /* counts the number of freed elements in this chunk. + * Whenever `freed_chunks` reaches the last chunk, the chunk + * will be deleted. */ - std::atomic< chunk_offset_t > item_count{ 0 }; + std::atomic< chunk_offset_t > freed_chunks{ 0 }; /* lowest index with free slot that can * be used to add a new element @@ -409,9 +406,10 @@ struct ChunkedList ChunkedList( size_t est_chunk_size = 32 ) : ChunkedList( Allocator(), - est_chunk_size + est_chunk_size * sizeof(Item) ) - {} + { + } ChunkedList( Allocator && alloc, @@ -425,6 +423,9 @@ struct ChunkedList size_t items_capacity = (chunks.get_chunk_capacity() - sizeof(Chunk)); this->chunk_size = items_capacity / sizeof(Item); assert( chunk_size < std::numeric_limits< chunk_offset_t >::max() ); + + // start with one batch of chunks + chunks.allocate_item(); } ChunkedList( ChunkedList && other ) = default; @@ -440,7 +441,7 @@ struct ChunkedList */ void release_chunk( typename memory::AtomicList< Chunk, Allocator >::MutBackwardIterator chunk ) { - if( chunk->item_count.fetch_sub(1) == 0 ) + if( chunk->freed_chunks.fetch_add(1) == chunk_size - 1u ) chunks.erase( chunk ); } @@ -453,25 +454,30 @@ struct ChunkedList auto chunk = chunks.rbegin(); if( chunk != chunks.rend() ) { - if( chunk->item_count.fetch_add(1) < chunk_size ) - { - unsigned chunk_off = chunk->next_idx.fetch_add(1); - - if( chunk_off < chunk_size ) - { - + unsigned chunk_off = chunk->next_idx.fetch_add(1); + + if( chunk_off < chunk_size ) + { chunk->items()[ chunk_off ] = item; chunk->last_idx ++; + if( chunk_off == chunk_size - 1) + { + // the thread who took the last chunk must allocate + // the next batch of chunks + chunks.allocate_item(); + } return MutBackwardIterator( chunk_size, chunk, chunk_off ); - } - } - - release_chunk(chunk); + } + else + { + // avoid that the counter can overflow + chunk->next_idx--; + } + } + else + { + throw std::runtime_error("chunk_list: invalid state, there should always be at least one chunk available."); } - - auto prev_chunk = chunks.allocate_item(); - if( prev_chunk != chunks.rend() ) - release_chunk( prev_chunk ); } }