From 66e10c55bac9319274bf4094e920bbbc98fd4488 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gregor=20Dai=C3=9F?= Date: Mon, 11 Mar 2024 12:31:46 +0100 Subject: [PATCH] Update action --- .github/workflows/documentation.yml | 2 +- README.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 43f49173..e9ea7638 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -16,7 +16,7 @@ jobs: steps: # checkout repository - name: Checkout cppuddle - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: path: cppuddle # install dependencies diff --git a/README.md b/README.md index 2af91449..4140c325 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,8 @@ In this use-case, allocating GPU buffers for all sub-grids in advance would have - Executor pools and various scheduling policies (round robin, priority queue, multi-gpu), which rely on reference counting to gauge the current load of a executor instead of querying the device itself. Tested with CUDA, HIP and Kokkos executors provided by HPX / HPX-Kokkos. - Special Executors/Allocators for on-the-fly work GPU aggregation (using HPX). +The documentation is available [here](https://sc-sgs.github.io/CPPuddle/index.html). In particular, the public functionality for the memory recycling in available in the namespace [memory_recycling](https://sc-sgs.github.io/CPPuddle/namespacecppuddle_1_1memory__recycling.html), for the executor pools it is available in the namespace [executor_recycling](https://sc-sgs.github.io/CPPuddle/namespacecppuddle_1_1executor__recycling.html) and the work aggregation (kernel fusion) functionality is available in the namespace [work_aggregation](https://sc-sgs.github.io/CPPuddle/namespacecppuddle_1_1kernel__aggregation.html). + #### Requirements - C++17