diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 2fdb9fa40274b..548661b9bb636 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -2400,6 +2400,7 @@ extern "C" { GGML_API int ggml_cpu_has_vsx (void); GGML_API int ggml_cpu_has_matmul_int8(void); GGML_API int ggml_cpu_has_cann (void); + GGML_API int ggml_cpu_has_llamafile (void); // // Internal types and functions exposed for tests and benchmarks diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index dbb3a3ebe1cca..f65837e856ac3 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -22005,6 +22005,14 @@ int ggml_cpu_has_cann(void) { #endif } +int ggml_cpu_has_llamafile(void) { +#if defined(GGML_USE_LLAMAFILE) + return 1; +#else + return 0; +#endif +} + int ggml_cpu_has_gpublas(void) { return ggml_cpu_has_cuda() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl(); } diff --git a/src/llama.cpp b/src/llama.cpp index 9e502018dfb76..80235ae19b270 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -19146,11 +19146,7 @@ const char * llama_print_system_info(void) { s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | "; s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | "; -#ifdef GGML_USE_LLAMAFILE - s += "LLAMAFILE = 1 | "; -#else - s += "LLAMAFILE = 0 | "; -#endif + s += "LLAMAFILE = " + std::to_string(ggml_cpu_has_llamafile()) + " | "; return s.c_str(); }