Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[proxy](5/n) use c headers instead of c++ headers #720

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 49 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -504,6 +504,55 @@ jobs:
- name: Test
run: ${{github.workspace}}/build/fuzzing/snmalloc-fuzzer

self-vendored:
name: Self Vendored STL Functionality
strategy:
fail-fast: false
matrix:
include:
- os: windows-2022
cxx: clang-cl
cc: clang-cl
- os: ubuntu-24.04
cxx: clang++-18
cc: clang-18
- os: ubuntu-24.04
cxx: g++-14
cc: gcc-14
- os: macos-latest
cxx: clang++
cc: clang
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- name: Prepare Windows
if: runner.os == 'Windows'
run: |
choco install ninja
- name: Prepare macOS
if: runner.os == 'macOS'
run: |
brew install ninja
- name: Prepare Ubuntu
if: runner.os == 'Linux'
run: |
sudo apt-get install -y ninja-build
- name: Configure CMake
run: >
cmake
-B ${{github.workspace}}/build
-DSNMALLOC_USE_SELF_VENDORED_STL=ON
-GNinja
-DCMAKE_BUILD_TYPE=RelWithDebInfo
-DCMAKE_CXX_COMPILER=${{ matrix.cxx }}
-DCMAKE_C_COMPILER=${{ matrix.cc }}
- name: Build
run: cmake --build ${{github.workspace}}/build --parallel
- name: Test
run: |
cd ${{github.workspace}}/build
ctest --parallel

all-checks:
# Currently FreeBSD and NetBSD CI are not working, so we do not require them to pass.
# Add fuzzing back when the memove issue is fixed.
Expand Down
7 changes: 7 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ option(SNMALLOC_BENCHMARK_INDIVIDUAL_MITIGATIONS "Build tests and ld_preload for
option(SNMALLOC_ENABLE_DYNAMIC_LOADING "Build such that snmalloc can be dynamically loaded. This is not required for LD_PRELOAD, and will harm performance if enabled." OFF)
option(SNMALLOC_ENABLE_WAIT_ON_ADDRESS "Use wait on address backoff strategy if it is available" ON)
option(SNMALLOC_ENABLE_FUZZING "Enable fuzzing instrumentation tests" OFF)
option(SNMALLOC_USE_SELF_VENDORED_STL "Avoid using system STL" OFF)
# Options that apply only if we're not building the header-only library
cmake_dependent_option(SNMALLOC_RUST_SUPPORT "Build static library for rust" OFF "NOT SNMALLOC_HEADER_ONLY_LIBRARY" OFF)
cmake_dependent_option(SNMALLOC_STATIC_LIBRARY "Build static libraries" ON "NOT SNMALLOC_HEADER_ONLY_LIBRARY" OFF)
Expand Down Expand Up @@ -205,6 +206,12 @@ else()
target_compile_definitions(snmalloc INTERFACE SNMALLOC_USE_WAIT_ON_ADDRESS=0)
endif()

if(SNMALLOC_USE_SELF_VENDORED_STL)
target_compile_definitions(snmalloc INTERFACE SNMALLOC_USE_SELF_VENDORED_STL=1)
else()
target_compile_definitions(snmalloc INTERFACE SNMALLOC_USE_SELF_VENDORED_STL=0)
endif()

# https://learn.microsoft.com/en-us/cpp/build/reference/zc-cplusplus
if(MSVC)
target_compile_options(snmalloc INTERFACE "/Zc:__cplusplus")
Expand Down
48 changes: 24 additions & 24 deletions docs/combininglock.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,14 +94,14 @@ Let us provide an MCS queue lock for our API given above:
```C++
struct LockNode
{
std::atomic<LockNode*> next{nullptr};
std::atomic<bool> available{false};
proxy::Atomic<LockNode*> next{nullptr};
proxy::Atomic<bool> available{false};
};

struct MCSLock
{
// End of queue
std::atomic<LockNode*> last;
proxy::Atomic<LockNode*> last;
};

template<typename F>
Expand All @@ -112,13 +112,13 @@ inline void with(MCSLock& lock, F&& f)

// **************ACQUIRE**************
// Add ourselves to the end of the queue.
LockNode* prev = lock.last.exchange(&node, std::memory_order_acq_rel);
LockNode* prev = lock.last.exchange(&node, proxy::memory_order_acq_rel);
if (prev != nullptr)
{
// Add link to previous element in the queue
prev->next.store(&node, std::memory_order_release);
prev->next.store(&node, proxy::memory_order_release);
// Wait for out turn.
while (!node.available.load(std::memory_order_acquire))
while (!node.available.load(proxy::memory_order_acquire))
;
}

Expand All @@ -128,19 +128,19 @@ inline void with(MCSLock& lock, F&& f)

// **************RELEASE**************
// Check if there is a next thread.
if (node.next.load(std::memory_order_acquire) == nullptr)
if (node.next.load(proxy::memory_order_acquire) == nullptr)
{
auto node_address = &node;
// No next thread so remove ourselves from the end of the queue
if (lock.last.compare_exchange_strong(node_address, nullptr, std::memory_order_acq_rel))
if (lock.last.compare_exchange_strong(node_address, nullptr, proxy::memory_order_acq_rel))
return;
// Wait for next thread to be set as we failed to remove ourselves from the end of the queue.
while (node.next.load(std::memory_order_acquire) == nullptr)
while (node.next.load(proxy::memory_order_acquire) == nullptr)
;
}

// Wake next thread.
node.next.load(std::memory_order_acquire)->available.store(true, std::memory_order_release);
node.next.load(proxy::memory_order_acquire)->available.store(true, proxy::memory_order_release);
}
```
The code can be broken into three parts:
Expand Down Expand Up @@ -187,8 +187,8 @@ This represents the operation that the must be executed for this thread.
```C++
struct CombiningLockNode
{
std::atomic<CombiningLockNode*> next{nullptr};
std::atomic<LockStatus> status{WAITING};
proxy::Atomic<CombiningLockNode*> next{nullptr};
proxy::Atomic<LockStatus> status{WAITING};
void (*f_raw)(CombiningLockNode*);

void run()
Expand Down Expand Up @@ -226,19 +226,19 @@ inline void with(CombiningLock& lock, F&& f)

// **************ACQUIRE**************
// Add ourselves to the end of the queue.
CombiningLockNode* prev = lock.last.exchange(&node, std::memory_order_acq_rel);
CombiningLockNode* prev = lock.last.exchange(&node, proxy::memory_order_acq_rel);

if (prev != nullptr)
{
// Add link to previous element in the queue
prev->next.store(&node, std::memory_order_release);
prev->next.store(&node, proxy::memory_order_release);

// Wait for our turn.
while (node.status.load(std::memory_order_relaxed) == LockStatus::WAITING)
while (node.status.load(proxy::memory_order_relaxed) == LockStatus::WAITING)
;

// Check if another thread completed our work.
if (node.status.load(std::memory_order_acquire) == LockStatus::DONE)
if (node.status.load(proxy::memory_order_acquire) == LockStatus::DONE)
return;
}

Expand All @@ -251,37 +251,37 @@ inline void with(CombiningLock& lock, F&& f)
curr->run();

// Check if there is another operation to execute
auto next = curr->next.load(std::memory_order_acquire);
auto next = curr->next.load(proxy::memory_order_acquire);
if (next == nullptr)
break;

// Notify thread that we completed its work.
curr->status.store(LockStatus::DONE, std::memory_order_release);
curr->status.store(LockStatus::DONE, proxy::memory_order_release);
curr = next;
}

// ***********RELEASE**************
// Attempt to close the queue
auto curr_address = curr;
if (lock.last.compare_exchange_strong(curr_address, nullptr, std::memory_order_acq_rel))
if (lock.last.compare_exchange_strong(curr_address, nullptr, proxy::memory_order_acq_rel))
{
curr->status.store(LockStatus::DONE, std::memory_order_release);
curr->status.store(LockStatus::DONE, proxy::memory_order_release);
return;
}

// Wait for next thread to be set as we failed to remove
// ourselves from the end of the queue.
while (curr->next.load(std::memory_order_relaxed) == nullptr)
while (curr->next.load(proxy::memory_order_relaxed) == nullptr)
;

// Read the next thread
auto next = curr->next.load(std::memory_order_acquire);
auto next = curr->next.load(proxy::memory_order_acquire);

// Notify the current thread that its work has been completed.
curr->status.store(LockStatus::DONE, std::memory_order_release);
curr->status.store(LockStatus::DONE, proxy::memory_order_release);
// Notify the next thread it is the head of the queue, and should
// perform operations from the queue.
next->status.store(LockStatus::HEAD, std::memory_order_release);
next->status.store(LockStatus::HEAD, proxy::memory_order_release);
return;
}
```
Expand Down
2 changes: 1 addition & 1 deletion docs/release/0.7/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ This allows developers to build new security features on top of snmalloc.
For instance, building snmalloc with the following definition of `Alloc` will allow you to store a 64-bit counter for each allocation:
```cpp
using Alloc = snmalloc::LocalAllocator<snmalloc::StandardConfigClientMeta<
ArrayClientMetaDataProvider<std::atomic<size_t>>>>;
ArrayClientMetaDataProvider<proxy::Atomic<size_t>>>>;
```

This does not affect the underlying alignment of the allocations.
Expand Down
8 changes: 4 additions & 4 deletions src/snmalloc/aal/aal.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# define SNMALLOC_TICK_USE_CLOCK_GETTIME
# endif
#endif
#include <cstdint>
#include <stdint.h>
#include <utility>

#ifndef SNMALLOC_TICK_USE_CLOCK_GETTIME
Expand Down Expand Up @@ -69,10 +69,10 @@ namespace snmalloc
* must explicitly give their address_t.
*
* This somewhat obtuse way of spelling the defaulting is necessary so
* that all arguments to std::conditional_t are valid, even if they
* that all arguments to proxy::conditional_t are valid, even if they
* wouldn't be valid in context. One might rather wish to say
*
* std::conditional_t<..., uintptr_t, Arch::address_t>
* proxy::conditional_t<..., uintptr_t, Arch::address_t>
*
* but that requires that Arch::address_t always be given, precisely
* the thing we're trying to avoid with the conditional.
Expand All @@ -83,7 +83,7 @@ namespace snmalloc
using address_t = uintptr_t;
};

using address_t = typename std::conditional_t<
using address_t = typename proxy::conditional_t<
(Arch::aal_features & IntegerPointers) != 0,
default_address_t,
Arch>::address_t;
Expand Down
2 changes: 1 addition & 1 deletion src/snmalloc/aal/aal_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# endif
#endif

#include <cstddef>
#include <stddef.h>

namespace snmalloc
{
Expand Down
11 changes: 5 additions & 6 deletions src/snmalloc/aal/aal_concept.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@
# include "../ds_core/ds_core.h"
# include "aal_consts.h"

# include <cstdint>
# include <utility>
# include <stdint.h>

namespace snmalloc
{
Expand All @@ -16,10 +15,10 @@ namespace snmalloc
template<typename AAL>
concept IsAAL_static_members =
requires() {
typename std::integral_constant<uint64_t, AAL::aal_features>;
typename std::integral_constant<int, AAL::aal_name>;
typename std::integral_constant<std::size_t, AAL::bits>;
typename std::integral_constant<std::size_t, AAL::address_bits>;
typename proxy::integral_constant<uint64_t, AAL::aal_features>;
typename proxy::integral_constant<int, AAL::aal_name>;
typename proxy::integral_constant<size_t, AAL::bits>;
typename proxy::integral_constant<size_t, AAL::address_bits>;
};

/**
Expand Down
2 changes: 1 addition & 1 deletion src/snmalloc/aal/aal_consts.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#pragma once
#include <cstdint>
#include <stdint.h>

namespace snmalloc
{
Expand Down
2 changes: 1 addition & 1 deletion src/snmalloc/aal/address.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#pragma once
#include "../ds_core/ds_core.h"

#include <cstdint>
#include <stdint.h>

namespace snmalloc
{
Expand Down
3 changes: 2 additions & 1 deletion src/snmalloc/backend/fixedglobalconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,8 @@ namespace snmalloc
static_assert(B::wildness == capptr::dimension::Wildness::Wild);

static const size_t sz = sizeof(
std::conditional<std::is_same_v<std::remove_cv<T>, void>, void*, T>);
std::
conditional<proxy::is_same_v<proxy::remove_cv<T>, void>, void*, T>);

UNUSED(ls);
auto address = address_cast(p);
Expand Down
8 changes: 4 additions & 4 deletions src/snmalloc/backend/globalconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ namespace snmalloc
/**
* Use one of the default range configurations
*/
using LocalState = std::conditional_t<
using LocalState = proxy::conditional_t<
mitigations(metadata_protection),
MetaProtectedRangeLocalState<Pal, Pagemap, Base>,
StandardLocalState<Pal, Pagemap, Base>>;
Expand All @@ -84,7 +84,7 @@ namespace snmalloc
* Specifies if the Configuration has been initialised.
*/
SNMALLOC_REQUIRE_CONSTINIT
inline static std::atomic<bool> initialised{false};
inline static proxy::Atomic<bool> initialised{false};

/**
* Used to prevent two threads attempting to initialise the configuration
Expand Down Expand Up @@ -126,7 +126,7 @@ namespace snmalloc
Authmap::init();
}

initialised.store(true, std::memory_order_release);
initialised.store(true, proxy::memory_order_release);
});
}

Expand All @@ -146,7 +146,7 @@ namespace snmalloc
// and concurrency safe.
SNMALLOC_FAST_PATH static void ensure_init()
{
if (SNMALLOC_LIKELY(initialised.load(std::memory_order_acquire)))
if (SNMALLOC_LIKELY(initialised.load(proxy::memory_order_acquire)))
return;

ensure_init_slow();
Expand Down
2 changes: 1 addition & 1 deletion src/snmalloc/backend/meta_protected_range.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ namespace snmalloc
CommitRange<PAL>,
// In case of huge pages, we don't want to give each thread its own huge
// page, so commit in the global range.
std::conditional_t<
proxy::conditional_t<
(max_page_chunk_size_bits > MIN_CHUNK_BITS),
LargeBuddyRange<
max_page_chunk_size_bits,
Expand Down
4 changes: 2 additions & 2 deletions src/snmalloc/backend_helpers/authmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ namespace snmalloc
struct BasicAuthmap
{
static_assert(
std::is_same_v<capptr::Arena<void>, typename ConcreteMap::EntryType>,
proxy::is_same_v<capptr::Arena<void>, typename ConcreteMap::EntryType>,
"BasicAuthmap's ConcreteMap must have capptr::Arena<void> element type!");

private:
Expand Down Expand Up @@ -70,7 +70,7 @@ namespace snmalloc
* Pick between the two above implementations based on StrictProvenance
*/
template<typename CA>
using DefaultAuthmap = std::conditional_t<
using DefaultAuthmap = proxy::conditional_t<
aal_supports<StrictProvenance>,
BasicAuthmap<CA>,
DummyAuthmap>;
Expand Down
4 changes: 2 additions & 2 deletions src/snmalloc/backend_helpers/largebuddyrange.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ namespace snmalloc
* covers the whole range. Uses template insanity to make this work.
*/
template<bool exists = MAX_SIZE_BITS != (bits::BITS - 1)>
std::enable_if_t<exists>
proxy::enable_if_t<exists>
parent_dealloc_range(capptr::Arena<void> base, size_t size)
{
static_assert(
Expand Down Expand Up @@ -343,7 +343,7 @@ namespace snmalloc
/* The large buddy allocator always deals in Arena-bounded pointers. */
using ChunkBounds = capptr::bounds::Arena;
static_assert(
std::is_same_v<typename ParentRange::ChunkBounds, ChunkBounds>);
proxy::is_same_v<typename ParentRange::ChunkBounds, ChunkBounds>);

constexpr Type() = default;

Expand Down
Loading
Loading