diff --git a/redGrapes/util/atomic_list.hpp b/redGrapes/util/atomic_list.hpp index c0fb1f3f..e26101e6 100644 --- a/redGrapes/util/atomic_list.hpp +++ b/redGrapes/util/atomic_list.hpp @@ -159,7 +159,7 @@ struct AtomicList /* initializes a new chunk */ - void allocate_item() + auto allocate_item() { TRACE_EVENT("Allocator", "AtomicList::allocate_item()"); @@ -174,7 +174,7 @@ struct AtomicList */ StaticAlloc chunk_alloc( this->alloc, chunk_size ); uintptr_t base = (uintptr_t)chunk_alloc.ptr; - append_item( + return append_item( std::allocate_shared< ItemPtr >( chunk_alloc, @@ -188,20 +188,6 @@ struct AtomicList ); } - /* atomically appends a floating chunk to this list - */ - void append_item( std::shared_ptr< ItemPtr > new_head ) - { - TRACE_EVENT("Allocator", "AtomicList::append_item()"); - bool append_successful = false; - while( ! append_successful ) - { - std::shared_ptr< ItemPtr > old_head = std::atomic_load( &head ); - std::atomic_store( &new_head->prev, old_head ); - append_successful = std::atomic_compare_exchange_strong( &head, &old_head, new_head ); - } - } - template < bool is_const = false > struct BackwardIterator { @@ -296,6 +282,28 @@ struct AtomicList { pos.erase(); } + + /* atomically appends a floating chunk to this list + * and returns the previous head to which the new_head + * is now linked. + */ + auto append_item( std::shared_ptr< ItemPtr > new_head ) + { + TRACE_EVENT("Allocator", "AtomicList::append_item()"); + std::shared_ptr< ItemPtr > old_head; + + bool append_successful = false; + while( ! append_successful ) + { + old_head = std::atomic_load( &head ); + std::atomic_store( &new_head->prev, old_head ); + append_successful = std::atomic_compare_exchange_strong( &head, &old_head, new_head ); + } + + return MutBackwardIterator{ old_head }; + } + + }; } // namespace memory diff --git a/redGrapes/util/chunked_list.hpp b/redGrapes/util/chunked_list.hpp index be8870b3..92d2a531 100644 --- a/redGrapes/util/chunked_list.hpp +++ b/redGrapes/util/chunked_list.hpp @@ -79,7 +79,7 @@ template < > struct ChunkedList { - using chunk_offset_t = uint16_t; + using chunk_offset_t = int16_t; using refcount_t = uint16_t; struct Item @@ -170,7 +170,7 @@ struct ChunkedList * compensated by not increasing the count when the last * element is inserted. */ - std::atomic< chunk_offset_t > item_count{ 1 }; + std::atomic< chunk_offset_t > item_count{ 0 }; /* lowest index with free slot that can * be used to add a new element @@ -435,6 +435,15 @@ struct ChunkedList spdlog::error("copy construct ChunkedList!!"); } + /* decrement item_count and in case all items of this chunk are deleted, + * and this chunk is not `head`, delete the chunk too + */ + void release_chunk( typename memory::AtomicList< Chunk, Allocator >::MutBackwardIterator chunk ) + { + if( chunk->item_count.fetch_sub(1) == 0 ) + chunks.erase( chunk ); + } + MutBackwardIterator push( T const& item ) { TRACE_EVENT("ChunkedList", "push"); @@ -444,20 +453,25 @@ struct ChunkedList auto chunk = chunks.rbegin(); if( chunk != chunks.rend() ) { - unsigned chunk_off = chunk->next_idx.fetch_add(1); - - if( chunk_off < chunk_size ) - { - if( chunk_off+1 < chunk_size ) - chunk->item_count ++; - + if( chunk->item_count.fetch_add(1) < chunk_size ) + { + unsigned chunk_off = chunk->next_idx.fetch_add(1); + + if( chunk_off < chunk_size ) + { + chunk->items()[ chunk_off ] = item; chunk->last_idx ++; return MutBackwardIterator( chunk_size, chunk, chunk_off ); - } + } + } + + release_chunk(chunk); } - chunks.allocate_item(); + auto prev_chunk = chunks.allocate_item(); + if( prev_chunk != chunks.rend() ) + release_chunk( prev_chunk ); } } @@ -484,14 +498,7 @@ struct ChunkedList */ pos.item().remove(); - /* in case all items of this chunk are deleted, - * delete the chunk too - */ - if( pos.chunk->item_count.fetch_sub(1) == 1 ) - { - // spdlog::info("last item!!"); - chunks.erase( pos.chunk ); - } + release_chunk( pos.chunk ); } else throw std::runtime_error("remove invalid position");