gnu: llama-cpp: Enable Vulkan.

* gnu/packages/patches/llama-cpp-vulkan-optional.patch: New file.
* gnu/local.mk (dist_patch_DATA): Add it.
* gnu/packages/machine-learning.scm (llama-cpp)
[source]: Add patch.
[arguments]<#:tests?>: Disable.
<#:configure-flags>: Add "-DGGML_VULKAN=ON".
<#:phases>[patch-paths]: New phase.
[inputs]: Add vulkan-headers, vulkan-loader.
[native-inputs]: Add shaderc, bash.

Change-Id: Ib7a58f5c7f622213f3aaf5abcd701b17eed80f6b
This commit is contained in:
Danny Milosavljevic 2025-01-29 01:22:33 +01:00
parent a6a4f773f3
commit 14b8039d94
No known key found for this signature in database
GPG key ID: E71A35542C30BAA5
2 changed files with 37 additions and 5 deletions

View file

@ -595,12 +595,15 @@ Performance is achieved by using the LLVM JIT compiler.")
(commit tag)))
(file-name (git-file-name name tag))
(sha256
(base32 "1xf2579q0r8nv06kj8padi6w9cv30w58vdys65nq8yzm3dy452a1"))))
(base32 "1xf2579q0r8nv06kj8padi6w9cv30w58vdys65nq8yzm3dy452a1"))
(patches
(search-patches "llama-cpp-vulkan-optional.patch"))))
(build-system cmake-build-system)
(arguments
(list
#:configure-flags
#~(list "-DBUILD_SHARED_LIBS=ON"
"-DGGML_VULKAN=ON"
"-DGGML_BLAS=ON"
"-DGGML_BLAS_VENDOR=OpenBLAS"
(string-append "-DBLAS_INCLUDE_DIRS="
@ -625,10 +628,15 @@ Performance is achieved by using the LLVM JIT compiler.")
(guix build python-build-system))
#:phases
#~(modify-phases %standard-phases
(add-after 'unpack 'patch-paths
(lambda* (#:key inputs #:allow-other-keys)
(substitute* "ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp"
(("\"/bin/sh\"")
(string-append "\"" (search-input-file inputs "/bin/sh") "\"")))))
(add-after 'unpack 'disable-unrunable-tests
;; test-eval-callback downloads ML model from network, cannot
;; run in Guix build environment
(lambda _
;; test-eval-callback downloads ML model from network, cannot
;; run in Guix build environment
(substitute* '("examples/eval-callback/CMakeLists.txt")
(("COMMAND llama-eval-callback")
"COMMAND true llama-eval-callback"))))
@ -658,8 +666,8 @@ Performance is achieved by using the LLVM JIT compiler.")
(string-append (assoc-ref outputs "out")
"/bin")
"^test-")))))))
(inputs (list python))
(native-inputs (list pkg-config))
(inputs (list python vulkan-headers vulkan-loader))
(native-inputs (list pkg-config shaderc bash))
(propagated-inputs
(list python-numpy python-pytorch python-sentencepiece openblas))
(properties '((tunable? . #true))) ;use AVX512, FMA, etc. when available

View file

@ -0,0 +1,24 @@
Author: Danny Milosavljevic <dannym@friendly-machines.com>
Date: 2025-01-29
License: Expat
diff -ru orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp
--- orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 06:24:10.922476480 +0100
+++ llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 06:26:45.973357439 +0100
@@ -7317,9 +7317,13 @@
}
static int ggml_vk_get_device_count() {
- ggml_vk_instance_init();
-
- return vk_instance.device_indices.size();
+ try {
+ ggml_vk_instance_init();
+ return vk_instance.device_indices.size();
+ } catch (const vk::SystemError& e) {
+ std::cerr << "ggml_vulkan: Error: System error" << std::endl;
+ return 0;
+ }
}
static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {