Skip to content

Commit

Permalink
ChunkedList: store pointers to next/last elements instead of indices …
Browse files Browse the repository at this point in the history
…to save overhead of address calculation and allow more efficient packing of ItemAccess

* this also reintroduces the chunk-size as template parameter instead of storing it as member
* initialize AtomicList with `chunk_capacity`, not allocation size
* AtomicList: construct Item/ItemControlBlock from `memory::Block`
* ChunkedList: construct Chunk from `memory::Block`
* reorder ChunkedList::Item struct for more efficient alignment
* encode `has_item` of ItemAccess into one 8-byte pointer using bitmagic
  • Loading branch information
michaelsippel committed Dec 12, 2023
1 parent 5f71af6 commit a0d1479
Show file tree
Hide file tree
Showing 8 changed files with 172 additions and 188 deletions.
5 changes: 1 addition & 4 deletions redGrapes/resource/resource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,7 @@ unsigned int ResourceBase::generateID()
ResourceBase::ResourceBase()
: id( generateID() )
, scope_level( scope_depth() )
, users(
memory::Allocator( get_arena_id() ),
REDGRAPES_RUL_CHUNKSIZE
)
, users( memory::Allocator( get_arena_id() ) )
{}

unsigned ResourceBase::get_arena_id() const {
Expand Down
2 changes: 1 addition & 1 deletion redGrapes/resource/resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class ResourceBase
unsigned int scope_level;

SpinLock users_mutex;
ChunkedList< Task* > users;
ChunkedList< Task*, REDGRAPES_RUL_CHUNKSIZE > users;

/**
* Create a new resource with an unused ID.
Expand Down
6 changes: 4 additions & 2 deletions redGrapes/resource/resource_user.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ namespace redGrapes

ResourceUser::ResourceUser()
: scope_level( SingletonContext::get().scope_depth() )
, access_list( 64 )
, unique_resources( 64 )
, access_list( memory::Allocator() )
, unique_resources( memory::Allocator() )
{
}

Expand All @@ -29,6 +29,8 @@ namespace redGrapes

ResourceUser::ResourceUser( std::initializer_list< ResourceAccess > list )
: scope_level( scope_depth() )
, access_list( memory::Allocator() )
, unique_resources( memory::Allocator() )
{
for( auto & ra : list )
add_resource_access(ra);
Expand Down
8 changes: 4 additions & 4 deletions redGrapes/resource/resource_user.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#include <redGrapes/util/chunked_list.hpp>
#include <redGrapes/util/trace.hpp>

//#include <redGrapes/resource/resource.hpp>
#include <redGrapes/resource/resource.hpp>

namespace redGrapes
{
Expand All @@ -32,7 +32,7 @@ struct ResourceAccess;
struct ResourceUsageEntry
{
std::shared_ptr< ResourceBase > resource;
typename ChunkedList< Task* >::MutBackwardIterator task_entry;
typename ChunkedList< Task*, REDGRAPES_RUL_CHUNKSIZE >::MutBackwardIterator task_entry;

bool operator==( ResourceUsageEntry const & other ) const;
};
Expand All @@ -54,8 +54,8 @@ class ResourceUser

uint8_t scope_level;

ChunkedList<ResourceAccess> access_list;
ChunkedList<ResourceUsageEntry> unique_resources;
ChunkedList<ResourceAccess, 8> access_list;
ChunkedList<ResourceUsageEntry, 8> unique_resources;
}; // class ResourceUser

} // namespace redGrapes
Expand Down
6 changes: 4 additions & 2 deletions redGrapes/scheduler/event.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,21 @@ namespace scheduler
Event::Event()
: state(1)
, waker_id(-1)
, followers( REDGRAPES_EVENT_FOLLOWER_LIST_CHUNKSIZE )
, followers( memory::Allocator() )
{
}

Event::Event(Event & other)
: state((uint16_t)other.state)
, waker_id(other.waker_id)
, waker_id( other.waker_id )
, followers( memory::Allocator() )
{
}

Event::Event(Event && other)
: state((uint16_t)other.state)
, waker_id(other.waker_id)
, followers( memory::Allocator() )
{
}

Expand Down
2 changes: 1 addition & 1 deletion redGrapes/scheduler/event.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ struct Event
WakerId waker_id;

//! the set of subsequent events
ChunkedList< EventPtr > followers;
ChunkedList< EventPtr, REDGRAPES_EVENT_FOLLOWER_LIST_CHUNKSIZE > followers;

Event();
Event(Event &);
Expand Down
96 changes: 46 additions & 50 deletions redGrapes/util/atomic_list.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
#include <hwloc.h>
#include <spdlog/spdlog.h>

#include <redGrapes/memory/block.hpp>

namespace redGrapes
{
namespace memory
Expand All @@ -42,22 +44,26 @@ template <
struct AtomicList
{
//private:
struct ItemPtr
struct ItemControlBlock
{
bool volatile deleted;
std::shared_ptr< ItemPtr > prev;
Item * item_data;

template < typename... Args >
ItemPtr( Item * item_data, Args&&... args )
: deleted(false)
, prev(nullptr)
, item_data(item_data)
std::shared_ptr< ItemControlBlock > prev;
uintptr_t item_data_ptr;

ItemControlBlock( memory::Block blk )
: item_data_ptr( blk.ptr )
{
new ( get() ) Item ( std::forward<Args>(args)... );
/* put Item at front and initialize it
* with the remaining memory region
*/
blk.ptr += sizeof(Item);
blk.len -= sizeof(Item);

spdlog::info("init ItemControlBlock with {}", blk.ptr);
new ( get() ) Item ( blk );
}

~ItemPtr()
~ItemControlBlock()
{
get()->~Item();
}
Expand All @@ -75,7 +81,7 @@ struct AtomicList
*/
void skip_deleted_prev()
{
std::shared_ptr<ItemPtr> p = std::atomic_load( &prev );
std::shared_ptr<ItemControlBlock> p = std::atomic_load( &prev );
while( p && p->deleted )
p = std::atomic_load( &p->prev );

Expand All @@ -84,13 +90,13 @@ struct AtomicList

Item * get() const
{
return item_data;
return (Item*)item_data_ptr;
}
};

Allocator alloc;
std::shared_ptr< ItemPtr > head;
size_t const chunk_size;
std::shared_ptr< ItemControlBlock > head;
size_t const chunk_capacity;

/* keeps a single, predefined pointer
* and frees it on deallocate.
Expand Down Expand Up @@ -126,35 +132,30 @@ struct AtomicList
}
};



public:

AtomicList( Allocator && alloc, size_t chunk_size )
AtomicList( Allocator && alloc, size_t chunk_capacity )
: alloc( alloc )
, head( nullptr )
, chunk_size( chunk_size )
, chunk_capacity( chunk_capacity )
{
#ifndef NDEBUG
if( chunk_size <= get_controlblock_size() )
spdlog::error("chunksize = {}, control block ={}", chunk_size, get_controlblock_size());
#endif

assert( chunk_size > get_controlblock_size() );
}

static constexpr size_t get_controlblock_size()
{
/* TODO: use sizeof( ...shared_ptr_inplace_something... )
*/
size_t const shared_ptr_size = 512;

return sizeof(ItemPtr) + shared_ptr_size;
return sizeof(ItemControlBlock) + shared_ptr_size;
}

constexpr size_t get_chunk_capacity()
{
return chunk_size - get_controlblock_size();
return chunk_capacity;
}

constexpr size_t get_chunk_allocsize()
{
return chunk_capacity + get_controlblock_size();
}

/* initializes a new chunk
Expand All @@ -168,30 +169,25 @@ struct AtomicList
* - shared_ptr control block
* - chunk control block
* - chunk data
* whereby chunk data is not included by sizeof(Chunk),
* whereby chunk data is not included by sizeof(ItemControlBlock),
* but reserved by StaticAlloc.
* This works because shared_ptr control block lies at lower address.
*/
StaticAlloc<void> chunk_alloc( this->alloc, chunk_size );
uintptr_t base = (uintptr_t)chunk_alloc.ptr;
return append_item(
std::allocate_shared< ItemPtr >(
chunk_alloc,

/* TODO: generalize this constructor call,
* specialized for `memory chunks` now
*/
(Item*) (base + get_controlblock_size()),
base + get_controlblock_size() + sizeof(Item),
base + chunk_size
)
);
StaticAlloc<void> chunk_alloc( this->alloc, get_chunk_allocsize() );

// this block will contain the Item-data of ItemControlBlock
memory::Block blk{
.ptr = (uintptr_t)chunk_alloc.ptr + get_controlblock_size(),
.len = chunk_capacity - get_controlblock_size()
};

return append_item( std::allocate_shared< ItemControlBlock >( chunk_alloc, blk ) );
}

template < bool is_const = false >
struct BackwardIterator
{
std::shared_ptr< ItemPtr > c;
std::shared_ptr< ItemControlBlock > c;

void erase()
{
Expand Down Expand Up @@ -259,7 +255,7 @@ struct AtomicList

MutBackwardIterator rend() const
{
return MutBackwardIterator{ std::shared_ptr<ItemPtr>() };
return MutBackwardIterator{ std::shared_ptr<ItemControlBlock>() };
}

ConstBackwardIterator crbegin() const
Expand All @@ -269,7 +265,7 @@ struct AtomicList

ConstBackwardIterator crend() const
{
return ConstBackwardIterator{ std::shared_ptr<ItemPtr>() };
return ConstBackwardIterator{ std::shared_ptr<ItemControlBlock>() };
}

/* Flags chunk at `pos` as erased. Actual removal is delayed until
Expand All @@ -287,17 +283,17 @@ struct AtomicList
* and returns the previous head to which the new_head
* is now linked.
*/
auto append_item( std::shared_ptr< ItemPtr > new_head )
auto append_item( std::shared_ptr< ItemControlBlock > new_head )
{
TRACE_EVENT("Allocator", "AtomicList::append_item()");
std::shared_ptr< ItemPtr > old_head;
std::shared_ptr< ItemControlBlock > old_head;

bool append_successful = false;
while( ! append_successful )
{
old_head = std::atomic_load( &head );
std::atomic_store( &new_head->prev, old_head );
append_successful = std::atomic_compare_exchange_strong<ItemPtr>( &head, &old_head, new_head );
append_successful = std::atomic_compare_exchange_strong<ItemControlBlock>( &head, &old_head, new_head );
}

return MutBackwardIterator{ old_head };
Expand Down
Loading

0 comments on commit a0d1479

Please sign in to comment.