llama-cpp: bump version, enable RPC-server

This commit is contained in:
Gerasim Troeglazov
2025-02-08 11:25:47 +10:00
parent 799fca4e0a
commit 2c34dbac4f
2 changed files with 7 additions and 2 deletions

View File

@@ -13,7 +13,7 @@ COPYRIGHT="Georgi Gerganov"
LICENSE="MIT"
REVISION="1"
SOURCE_URI="https://github.com/ggerganov/llama.cpp/archive/refs/tags/$portVersion.tar.gz"
CHECKSUM_SHA256="a1b82ab73aa7aa287089ea1752066e60f98adc32859cde40beab190ba0bd97de"
CHECKSUM_SHA256="c630e767ce858e0ea16980bc38a1fa008c25a320610d07aa136b147446a1b3ef"
SOURCE_DIR="llama.cpp-$portVersion"
SOURCE_URI_2="https://github.com/microsoft/mimalloc/archive/refs/tags/v2.1.7.tar.gz"
CHECKSUM_SHA256_2="0eed39319f139afde8515010ff59baf24de9e47ea316a315398e8027d198202d"
@@ -28,6 +28,7 @@ PROVIDES="
lib:libggml = $portVersion
lib:libggml_base = $portVersion
lib:libggml_cpu = $portVersion
lib:libggml_rpc = $portVersion
lib:libllama = $portVersion
lib:libllava_shared = $portVersion
cmd:convert_hf_to_gguf.py
@@ -73,6 +74,7 @@ PROVIDES="
cmd:llama_speculative_simple
cmd:llama_tokenize
cmd:llama_tts
cmd:rpc_server
"
REQUIRES="
haiku
@@ -84,6 +86,7 @@ PROVIDES_devel="
devel:libggml = $portVersion
devel:libggml_base = $portVersion
devel:libggml_cpu = $portVersion
devel:libggml_rpc = $portVersion
devel:libllama = $portVersion
devel:libllava_shared = $portVersion
"
@@ -118,8 +121,8 @@ BUILD()
-DLLAMA_BUILD_SERVER=ON \
-DLLAMA_BUILD_TESTS=OFF \
-DLLAMA_BUILD_EXAMPLES=ON \
-DLLAMA_NATIVE=OFF \
-DGGML_NATIVE=OFF \
-DGGML_RPC=ON \
-DCMAKE_EXE_LINKER_FLAGS="$sourceDir/build_mimalloc/mimalloc.o"
make -C build $jobArgs
@@ -128,11 +131,13 @@ BUILD()
INSTALL()
{
make -C build install
cp build/bin/rpc-server $binDir
prepareInstalledDevelLibs \
libggml \
libggml-base \
libggml-cpu \
libggml-rpc \
libllama \
libllava_shared