mirror of
https://github.com/yann64/haikuports.git
synced 2026-04-09 05:10:05 +02:00
llama-cpp: add recipe
This commit is contained in:
149
dev-cpp/llama-cpp/llama_cpp-b4644.recipe
Normal file
149
dev-cpp/llama-cpp/llama_cpp-b4644.recipe
Normal file
@@ -0,0 +1,149 @@
|
||||
SUMMARY="Inference of Meta's LLaMA model (and others) in pure C/C++"
|
||||
DESCRIPTION="The main goal of llama.cpp is to enable LLM inference with minimal setup and state-of-the-art \
|
||||
performance on a wide variety of hardware - locally and in the cloud.
|
||||
|
||||
* Plain C/C++ implementation without any dependencies
|
||||
* AVX and AVX2 support for x86 architectures
|
||||
* 1.5-bit, 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory use
|
||||
|
||||
Since its inception, the project has improved significantly thanks to many contributions. It is the main playground \
|
||||
for developing new features for the ggml library."
|
||||
HOMEPAGE="https://github.com/ggerganov/llama.cpp/"
|
||||
COPYRIGHT="Georgi Gerganov"
|
||||
LICENSE="MIT"
|
||||
REVISION="1"
|
||||
SOURCE_URI="https://github.com/ggerganov/llama.cpp/archive/refs/tags/$portVersion.tar.gz"
|
||||
CHECKSUM_SHA256="a1b82ab73aa7aa287089ea1752066e60f98adc32859cde40beab190ba0bd97de"
|
||||
SOURCE_DIR="llama.cpp-$portVersion"
|
||||
SOURCE_URI_2="https://github.com/microsoft/mimalloc/archive/refs/tags/v2.1.7.tar.gz"
|
||||
CHECKSUM_SHA256_2="0eed39319f139afde8515010ff59baf24de9e47ea316a315398e8027d198202d"
|
||||
SOURCE_DIR_2="mimalloc-2.1.7"
|
||||
|
||||
PATCHES="llama_cpp-$portVersion.patchset"
|
||||
|
||||
ARCHITECTURES="all !x86_gcc2"
|
||||
|
||||
PROVIDES="
|
||||
llama_cpp = $portVersion
|
||||
lib:libggml = $portVersion
|
||||
lib:libggml_base = $portVersion
|
||||
lib:libggml_cpu = $portVersion
|
||||
lib:libllama = $portVersion
|
||||
lib:libllava_shared = $portVersion
|
||||
cmd:convert_hf_to_gguf.py
|
||||
cmd:llama_baby_llama
|
||||
cmd:llama_batched
|
||||
cmd:llama_batched_bench
|
||||
cmd:llama_bench
|
||||
cmd:llama_cli
|
||||
cmd:llama_convert_llama2c_to_ggml
|
||||
cmd:llama_cvector_generator
|
||||
cmd:llama_embedding
|
||||
cmd:llama_eval_callback
|
||||
cmd:llama_export_lora
|
||||
cmd:llama_gbnf_validator
|
||||
cmd:llama_gguf
|
||||
cmd:llama_gguf_hash
|
||||
cmd:llama_gguf_split
|
||||
cmd:llama_gen_docs
|
||||
cmd:llama_gritlm
|
||||
cmd:llama_imatrix
|
||||
cmd:llama_infill
|
||||
cmd:llama_llava_cli
|
||||
cmd:llama_llava_clip_quantize_cli
|
||||
cmd:llama_lookahead
|
||||
cmd:llama_lookup
|
||||
cmd:llama_lookup_create
|
||||
cmd:llama_lookup_merge
|
||||
cmd:llama_lookup_stats
|
||||
cmd:llama_minicpmv_cli
|
||||
cmd:llama_parallel
|
||||
cmd:llama_passkey
|
||||
cmd:llama_perplexity
|
||||
cmd:llama_quantize
|
||||
cmd:llama_quantize_stats
|
||||
cmd:llama_qwen2vl_cli
|
||||
cmd:llama_retrieval
|
||||
cmd:llama_run
|
||||
cmd:llama_save_load_state
|
||||
cmd:llama_server
|
||||
cmd:llama_simple
|
||||
cmd:llama_simple_chat
|
||||
cmd:llama_speculative
|
||||
cmd:llama_speculative_simple
|
||||
cmd:llama_tokenize
|
||||
cmd:llama_tts
|
||||
"
|
||||
REQUIRES="
|
||||
haiku
|
||||
lib:libgomp
|
||||
"
|
||||
|
||||
PROVIDES_devel="
|
||||
llama_cpp_devel = $portVersion
|
||||
devel:libggml = $portVersion
|
||||
devel:libggml_base = $portVersion
|
||||
devel:libggml_cpu = $portVersion
|
||||
devel:libllama = $portVersion
|
||||
devel:libllava_shared = $portVersion
|
||||
"
|
||||
REQUIRES_devel="
|
||||
llama_cpp == $portVersion base
|
||||
"
|
||||
|
||||
BUILD_REQUIRES="
|
||||
haiku_devel
|
||||
devel:libgomp
|
||||
"
|
||||
BUILD_PREREQUIRES="
|
||||
cmd:cmake
|
||||
cmd:make
|
||||
cmd:gcc
|
||||
cmd:git
|
||||
"
|
||||
|
||||
BUILD()
|
||||
{
|
||||
cmake -Bbuild_mimalloc -S$sourceDir2 -DCMAKE_BUILD_TYPE=Release \
|
||||
-DMI_BUILD_SHARED=OFF \
|
||||
-DMI_BUILD_STATIC=OFF \
|
||||
-DMI_BUILD_TESTS=OFF \
|
||||
-DMI_BUILD_OBJECT=ON
|
||||
|
||||
make -C build_mimalloc $jobArgs
|
||||
|
||||
cmake -Bbuild -S. -DCMAKE_BUILD_TYPE=Release \
|
||||
$cmakeDirArgs \
|
||||
-DBUILD_SHARED_LIBS_DEFAULT=ON \
|
||||
-DLLAMA_BUILD_SERVER=ON \
|
||||
-DLLAMA_BUILD_TESTS=OFF \
|
||||
-DLLAMA_BUILD_EXAMPLES=ON \
|
||||
-DLLAMA_NATIVE=OFF \
|
||||
-DGGML_NATIVE=OFF \
|
||||
-DCMAKE_EXE_LINKER_FLAGS="$sourceDir/build_mimalloc/mimalloc.o"
|
||||
|
||||
make -C build $jobArgs
|
||||
}
|
||||
|
||||
INSTALL()
|
||||
{
|
||||
make -C build install
|
||||
|
||||
prepareInstalledDevelLibs \
|
||||
libggml \
|
||||
libggml-base \
|
||||
libggml-cpu \
|
||||
libllama \
|
||||
libllava_shared
|
||||
|
||||
fixPkgconfig
|
||||
|
||||
packageEntries devel \
|
||||
$developDir \
|
||||
$libDir/cmake
|
||||
}
|
||||
|
||||
TEST()
|
||||
{
|
||||
make -C build test
|
||||
}
|
||||
62
dev-cpp/llama-cpp/patches/llama_cpp-b4644.patchset
Normal file
62
dev-cpp/llama-cpp/patches/llama_cpp-b4644.patchset
Normal file
@@ -0,0 +1,62 @@
|
||||
From ddf1e86dbcf6a756b215cc219e3b06fee8f59ce9 Mon Sep 17 00:00:00 2001
|
||||
From: Gerasim Troeglazov <3dEyes@gmail.com>
|
||||
Date: Wed, 5 Feb 2025 21:11:33 +1000
|
||||
Subject: Fix build for Haiku
|
||||
|
||||
|
||||
diff --git a/common/common.cpp b/common/common.cpp
|
||||
index 8661e16..fb9c3c1 100644
|
||||
--- a/common/common.cpp
|
||||
+++ b/common/common.cpp
|
||||
@@ -67,6 +67,8 @@
|
||||
#if defined(LLAMA_USE_CURL)
|
||||
#ifdef __linux__
|
||||
#include <linux/limits.h>
|
||||
+#elif defined(__HAIKU__)
|
||||
+#include <limits.h>
|
||||
#elif defined(_WIN32)
|
||||
# if !defined(PATH_MAX)
|
||||
# define PATH_MAX MAX_PATH
|
||||
diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
|
||||
index 1b7cc8c..cdaded1 100644
|
||||
--- a/examples/server/CMakeLists.txt
|
||||
+++ b/examples/server/CMakeLists.txt
|
||||
@@ -47,4 +47,8 @@ if (WIN32)
|
||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||
endif()
|
||||
|
||||
+if (HAIKU)
|
||||
+ TARGET_LINK_LIBRARIES(${TARGET} PRIVATE network)
|
||||
+endif()
|
||||
+
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|
||||
diff --git a/ggml/src/ggml-rpc/CMakeLists.txt b/ggml/src/ggml-rpc/CMakeLists.txt
|
||||
index f5acb8e..3bb9073 100644
|
||||
--- a/ggml/src/ggml-rpc/CMakeLists.txt
|
||||
+++ b/ggml/src/ggml-rpc/CMakeLists.txt
|
||||
@@ -7,3 +7,7 @@ ggml_add_backend_library(ggml-rpc
|
||||
if (WIN32)
|
||||
target_link_libraries(ggml-rpc PRIVATE ws2_32)
|
||||
endif()
|
||||
+
|
||||
+if (HAIKU)
|
||||
+ target_link_libraries(ggml-rpc PRIVATE network)
|
||||
+endif()
|
||||
diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp
|
||||
index b716630..a1b9c7b 100644
|
||||
--- a/src/llama-mmap.cpp
|
||||
+++ b/src/llama-mmap.cpp
|
||||
@@ -22,6 +22,10 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
+#ifdef __HAIKU__
|
||||
+#undef _POSIX_MEMLOCK_RANGE
|
||||
+#endif
|
||||
+
|
||||
#if defined(_WIN32)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#ifndef NOMINMAX
|
||||
--
|
||||
2.45.2
|
||||
|
||||
Reference in New Issue
Block a user