1
1
using BinaryBuilder, Pkg
2
2
3
3
name = " llama_cpp"
4
- version = v " 0.0.3 " # fake version number
4
+ version = v " 0.0.4 " # fake version number
5
5
6
6
# url = "https://github.com/ggerganov/llama.cpp"
7
7
# description = "Port of Facebook's LLaMA model in C/C++"
@@ -16,19 +16,22 @@ version = v"0.0.3" # fake version number
16
16
# 0.0.1 20.03.2023 master-074bea2 https://github.com/ggerganov/llama.cpp/releases/tag/master-074bea2
17
17
# 0.0.2 21.03.2023 master-8cf9f34 https://github.com/ggerganov/llama.cpp/releases/tag/master-8cf9f34
18
18
# 0.0.3 22.03.2023 master-d5850c5 https://github.com/ggerganov/llama.cpp/releases/tag/master-d5850c5
19
+ # 0.0.4 25.03.2023 master-1972616 https://github.com/ggerganov/llama.cpp/releases/tag/master-1972616
19
20
20
21
sources = [
21
- # 2023.03.22, https://github.com/ggerganov/llama.cpp/releases/tag/master-d5850c5
22
- # fake version = 0.0.3
22
+ # fake version = 0.0.4
23
23
GitSource (" https://github.com/ggerganov/llama.cpp.git" ,
24
- " d5850c53ca179b9674b98f35d359763416a3cc11 " ),
24
+ " 19726169b379bebc96189673a19b89ab1d307659 " ),
25
25
DirectorySource (" ./bundled" ),
26
26
]
27
27
28
28
script = raw """
29
29
cd $WORKSPACE/srcdir/llama.cpp*
30
30
31
31
atomic_patch -p1 ../patches/cmake-remove-mcpu-native.patch
32
+ if [[ "${target}" == *-w64-mingw32* ]]; then
33
+ atomic_patch -p1 ../patches/windows-examples-fix-missing-ggml-link.patch
34
+ fi
32
35
33
36
EXTRA_CMAKE_ARGS=
34
37
if [[ "${target}" == *-linux-* ]]; then
@@ -40,25 +43,49 @@ cmake .. \
40
43
-DCMAKE_INSTALL_PREFIX=$prefix \
41
44
-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TARGET_TOOLCHAIN} \
42
45
-DCMAKE_BUILD_TYPE=RELEASE \
46
+ -DBUILD_SHARED_LIBS=ON \
47
+ -DLLAMA_BUILD_TESTS=OFF \
48
+ -DLLAMA_BUILD_EXAMPLES=ON \
49
+ -DLLAMA_OPENBLAS=OFF \
43
50
-DLLAMA_NATIVE=OFF \
44
51
$EXTRA_CMAKE_ARGS
45
52
make -j${nproc}
46
53
47
54
# `make install` doesn't work (2023.03.21)
48
55
# make install
49
- for prg in main quantize; do
56
+
57
+ # executables
58
+ for prg in embedding main perplexity quantize; do
50
59
install -Dvm 755 "./bin/${prg}${exeext}" "${bindir}/${prg}${exeext}"
51
60
done
52
61
62
+ # libs
63
+ for lib in libllama; do
64
+ if [[ "${target}" == *-w64-mingw32* ]]; then
65
+ install -Dvm 755 "./bin/${lib}.${dlext}" "${libdir}/${lib}.${dlext}"
66
+ else
67
+ install -Dvm 755 "./${lib}.${dlext}" "${libdir}/${lib}.${dlext}"
68
+ fi
69
+ done
70
+
71
+
72
+ # header files
73
+ for hdr in llama.h ggml.h; do
74
+ install -Dvm 644 "../${hdr}" "${includedir}/${hdr}"
75
+ done
76
+
53
77
install_license ../LICENSE
54
78
"""
55
79
56
80
platforms = supported_platforms (; exclude = p -> arch (p) ∉ [" i686" , " x86_64" , " aarch64" ])
57
81
platforms = expand_cxxstring_abis (platforms)
58
82
59
83
products = [
84
+ ExecutableProduct (" embedding" , :embedding ),
60
85
ExecutableProduct (" main" , :main ),
86
+ ExecutableProduct (" perplexity" , :perplexity ),
61
87
ExecutableProduct (" quantize" , :quantize ),
88
+ LibraryProduct (" libllama" , :libllama ),
62
89
]
63
90
64
91
dependencies = Dependency[
0 commit comments