mirror of
https://codeberg.org/guix/guix.git
synced 2025-10-02 02:15:12 +00:00
gnu: llama-cpp: Enable Vulkan.
* gnu/packages/patches/llama-cpp-vulkan-optional.patch: New file. * gnu/local.mk (dist_patch_DATA): Add it. * gnu/packages/machine-learning.scm (llama-cpp) [source]: Add patch. [arguments]<#:tests?>: Disable. <#:configure-flags>: Add "-DGGML_VULKAN=ON". <#:phases>[patch-paths]: New phase. [inputs]: Add vulkan-headers, vulkan-loader. [native-inputs]: Add shaderc, bash. Change-Id: Ib7a58f5c7f622213f3aaf5abcd701b17eed80f6b
This commit is contained in:
parent
a6a4f773f3
commit
14b8039d94
2 changed files with 37 additions and 5 deletions
|
@ -595,12 +595,15 @@ Performance is achieved by using the LLVM JIT compiler.")
|
||||||
(commit tag)))
|
(commit tag)))
|
||||||
(file-name (git-file-name name tag))
|
(file-name (git-file-name name tag))
|
||||||
(sha256
|
(sha256
|
||||||
(base32 "1xf2579q0r8nv06kj8padi6w9cv30w58vdys65nq8yzm3dy452a1"))))
|
(base32 "1xf2579q0r8nv06kj8padi6w9cv30w58vdys65nq8yzm3dy452a1"))
|
||||||
|
(patches
|
||||||
|
(search-patches "llama-cpp-vulkan-optional.patch"))))
|
||||||
(build-system cmake-build-system)
|
(build-system cmake-build-system)
|
||||||
(arguments
|
(arguments
|
||||||
(list
|
(list
|
||||||
#:configure-flags
|
#:configure-flags
|
||||||
#~(list "-DBUILD_SHARED_LIBS=ON"
|
#~(list "-DBUILD_SHARED_LIBS=ON"
|
||||||
|
"-DGGML_VULKAN=ON"
|
||||||
"-DGGML_BLAS=ON"
|
"-DGGML_BLAS=ON"
|
||||||
"-DGGML_BLAS_VENDOR=OpenBLAS"
|
"-DGGML_BLAS_VENDOR=OpenBLAS"
|
||||||
(string-append "-DBLAS_INCLUDE_DIRS="
|
(string-append "-DBLAS_INCLUDE_DIRS="
|
||||||
|
@ -625,10 +628,15 @@ Performance is achieved by using the LLVM JIT compiler.")
|
||||||
(guix build python-build-system))
|
(guix build python-build-system))
|
||||||
#:phases
|
#:phases
|
||||||
#~(modify-phases %standard-phases
|
#~(modify-phases %standard-phases
|
||||||
|
(add-after 'unpack 'patch-paths
|
||||||
|
(lambda* (#:key inputs #:allow-other-keys)
|
||||||
|
(substitute* "ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp"
|
||||||
|
(("\"/bin/sh\"")
|
||||||
|
(string-append "\"" (search-input-file inputs "/bin/sh") "\"")))))
|
||||||
(add-after 'unpack 'disable-unrunable-tests
|
(add-after 'unpack 'disable-unrunable-tests
|
||||||
;; test-eval-callback downloads ML model from network, cannot
|
|
||||||
;; run in Guix build environment
|
|
||||||
(lambda _
|
(lambda _
|
||||||
|
;; test-eval-callback downloads ML model from network, cannot
|
||||||
|
;; run in Guix build environment
|
||||||
(substitute* '("examples/eval-callback/CMakeLists.txt")
|
(substitute* '("examples/eval-callback/CMakeLists.txt")
|
||||||
(("COMMAND llama-eval-callback")
|
(("COMMAND llama-eval-callback")
|
||||||
"COMMAND true llama-eval-callback"))))
|
"COMMAND true llama-eval-callback"))))
|
||||||
|
@ -658,8 +666,8 @@ Performance is achieved by using the LLVM JIT compiler.")
|
||||||
(string-append (assoc-ref outputs "out")
|
(string-append (assoc-ref outputs "out")
|
||||||
"/bin")
|
"/bin")
|
||||||
"^test-")))))))
|
"^test-")))))))
|
||||||
(inputs (list python))
|
(inputs (list python vulkan-headers vulkan-loader))
|
||||||
(native-inputs (list pkg-config))
|
(native-inputs (list pkg-config shaderc bash))
|
||||||
(propagated-inputs
|
(propagated-inputs
|
||||||
(list python-numpy python-pytorch python-sentencepiece openblas))
|
(list python-numpy python-pytorch python-sentencepiece openblas))
|
||||||
(properties '((tunable? . #true))) ;use AVX512, FMA, etc. when available
|
(properties '((tunable? . #true))) ;use AVX512, FMA, etc. when available
|
||||||
|
|
24
gnu/packages/patches/llama-cpp-vulkan-optional.patch
Normal file
24
gnu/packages/patches/llama-cpp-vulkan-optional.patch
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
Author: Danny Milosavljevic <dannym@friendly-machines.com>
|
||||||
|
Date: 2025-01-29
|
||||||
|
License: Expat
|
||||||
|
|
||||||
|
diff -ru orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||||
|
--- orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 06:24:10.922476480 +0100
|
||||||
|
+++ llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 06:26:45.973357439 +0100
|
||||||
|
@@ -7317,9 +7317,13 @@
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ggml_vk_get_device_count() {
|
||||||
|
- ggml_vk_instance_init();
|
||||||
|
-
|
||||||
|
- return vk_instance.device_indices.size();
|
||||||
|
+ try {
|
||||||
|
+ ggml_vk_instance_init();
|
||||||
|
+ return vk_instance.device_indices.size();
|
||||||
|
+ } catch (const vk::SystemError& e) {
|
||||||
|
+ std::cerr << "ggml_vulkan: Error: System error" << std::endl;
|
||||||
|
+ return 0;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
|
Loading…
Add table
Add a link
Reference in a new issue