diff --git a/CMakeLists.txt b/CMakeLists.txt index f4068c3..76fa4d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,7 +10,7 @@ project(nvidia-pstated C) # Find the CUDAToolkit package find_package(CUDAToolkit REQUIRED COMPONENTS nvml) -# Declare the nvapi package +# Declare the NVAPI package FetchContent_Declare( nvapi @@ -18,7 +18,7 @@ FetchContent_Declare( URL_HASH SHA256=71339c274a6a633f19b6bd358c7f3045063c6bc106b7dc488aaa7360a6d2b9d7 ) -# Download and make the nvapi content available for use +# Download and make the NVAPI content available for use FetchContent_MakeAvailable(nvapi) # Define the executable target diff --git a/README.md b/README.md index af8ebd4..aaef383 100644 --- a/README.md +++ b/README.md @@ -8,13 +8,24 @@ A daemon that automatically manages the performance states of NVIDIA GPUs. #### Linux -Make sure you have the proprietary NVIDIA driver and the packages providing `libnvidia-api.so.1` and `libnvidia-ml.so.1` installed. +Make sure the proprietary NVIDIA driver is installed. + +You will need the following libraries: + +- `libnvidia-api.so.1` +- `libnvidia-ml.so.1` + +Packages that provide these libraries: - ArchLinux: `nvidia-utils` - Debian: `libnvidia-api1` or `libnvidia-tesla-api1` (depending on the GPU and driver installed) On Debian derivatives, you can use `apt search libnvidia-api.so.1` and `apt search libnvidia-ml.so.1` to find the package you need. +Note that you MUST run this daemon at the host level, i.e. where the CUDA Driver is available. You can NOT run this daemon in a container. + +![nvidia-container-stack](https://cloud.githubusercontent.com/assets/3028125/12213714/5b208976-b632-11e5-8406-38d379ec46aa.png) + #### Windows Make sure the NVIDIA driver is installed.