Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 25789ae

Browse files
author
zhouwg
committedMar 11, 2025
ggml-qnn: rebase to upstream
1 parent 95b60b4 commit 25789ae

File tree

9 files changed

+1522
-1875
lines changed

9 files changed

+1522
-1875
lines changed
 

‎common/console.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -241,9 +241,7 @@ namespace console {
241241
(void)codepoint;
242242
return 1;
243243
#else
244-
//return wcwidth(codepoint);
245-
(void)codepoint;
246-
return 1;
244+
return wcwidth(codepoint);
247245
#endif
248246
}
249247

‎examples/export-lora/export-lora.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ struct lora_merge_ctx {
148148

149149
ctx_out = gguf_init_empty();
150150
struct ggml_init_params params = {
151-
/*.mem_size =*/ static_cast<size_t>(gguf_get_n_tensors(base_model.ctx_gguf)*ggml_tensor_overhead()),
151+
/*.mem_size =*/ gguf_get_n_tensors(base_model.ctx_gguf)*ggml_tensor_overhead(),
152152
/*.mem_buffer =*/ NULL,
153153
/*.no_alloc =*/ true,
154154
};

‎ggml/src/ggml-qnn/ggml-qnn-impl.h

Lines changed: 0 additions & 617 deletions
This file was deleted.

‎ggml/src/ggml-qnn/ggml-qnn-ops.cpp

Lines changed: 0 additions & 687 deletions
This file was deleted.

‎ggml/src/ggml-qnn/ggml-qnn-ops.h

Lines changed: 0 additions & 52 deletions
This file was deleted.

‎ggml/src/ggml-qnn/ggml-qnn.cpp

Lines changed: 1510 additions & 241 deletions
Large diffs are not rendered by default.

‎scripts/build-run-android.sh

Lines changed: 6 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ function prepare_run_on_phone()
147147
adb shell chmod +x ${REMOTE_PATH}/${program}
148148
}
149149

150+
150151
function run_llamacli()
151152
{
152153
prepare_run_on_phone llama-cli
@@ -212,35 +213,6 @@ function run_test-op()
212213

213214
}
214215

215-
function run_ut_add()
216-
{
217-
prepare_run_on_phone ggml-qnn-ut
218-
219-
adb shell "cd ${REMOTE_PATH} \
220-
&& export LD_LIBRARY_PATH=${REMOTE_PATH} \
221-
&& ${REMOTE_PATH}/ggml-qnn-ut -t GGML_OP_ADD -b $qnnbackend"
222-
223-
}
224-
225-
function run_ut_mulmat()
226-
{
227-
prepare_run_on_phone ggml-qnn-ut
228-
229-
adb shell "cd ${REMOTE_PATH} \
230-
&& export LD_LIBRARY_PATH=${REMOTE_PATH} \
231-
&& ${REMOTE_PATH}/ggml-qnn-ut -t GGML_OP_MUL_MAT -b $qnnbackend"
232-
233-
}
234-
235-
function run_ut_mul()
236-
{
237-
prepare_run_on_phone ggml-qnn-ut
238-
239-
adb shell "cd ${REMOTE_PATH} \
240-
&& export LD_LIBRARY_PATH=${REMOTE_PATH} \
241-
&& ${REMOTE_PATH}/ggml-qnn-ut -t GGML_OP_MUL -b $qnnbackend"
242-
243-
}
244216

245217
function print_oplist()
246218
{
@@ -330,10 +302,7 @@ function show_usage()
330302
echo " $0 build"
331303
echo " $0 updateqnnlib"
332304
echo " $0 run_testops"
333-
echo " $0 run_testop [ADD/MUL/MUL_MAT] [0 (QNN_CPU) / 1 (QNN_GPU) / 2 (QNN_NPU)]"
334-
echo " $0 run_ut_add 0 (QNN_CPU) / 1 (QNN_GPU) / 2 (QNN_NPU) / 3 (ggml)"
335-
echo " $0 run_ut_mulmat 0 (QNN_CPU) / 1 (QNN_GPU) / 2 (QNN_NPU) / 3 (ggml)"
336-
echo " $0 run_ut_mul 0 (QNN_CPU) / 1 (QNN_GPU) / 2 (QNN_NPU) / 3 (ggml)"
305+
echo " $0 run_testop [ADD/MUL/MUL_MAT/...(op from print_oplist)] [0 (QNN_CPU) / 1 (QNN_GPU) / 2 (QNN_NPU)]"
337306
echo " $0 run_llamacli 0 (QNN_CPU) / 1 (QNN_GPU) / 2 (QNN_NPU) / 3 (ggml)"
338307
echo " $0 run_llamabench 0 (QNN_CPU) / 1 (QNN_GPU) / 2 (QNN_NPU) / 3 (ggml)"
339308

@@ -374,31 +343,20 @@ elif [ $# == 1 ]; then
374343
fi
375344
elif [ $# == 2 ]; then
376345
qnnbackend=$2
377-
if [ ${qnnbackend} -gt 3 ]; then
378-
show_usage
379-
exit 1
380-
fi
381-
382346
if [ "$1" == "run_llamacli" ]; then
383347
run_llamacli
384348
exit 0
385349
elif [ "$1" == "run_llamabench" ]; then
386350
run_llamabench
387351
exit 0
388-
elif [ "$1" == "run_ut_add" ]; then
389-
run_ut_add
390-
exit 0
391-
elif [ "$1" == "run_ut_mulmat" ]; then
392-
run_ut_mulmat
393-
exit 0
394-
elif [ "$1" == "run_ut_mul" ]; then
395-
run_ut_mul
396352
exit 0
353+
else
354+
show_usage
355+
exit 1
397356
fi
398357
elif [ $# == 3 ]; then
358+
#opname can be found via print_oplist:
399359
opname=$2
400-
#TODO: check opname in oplist
401-
#opname can be found via print_oplist:
402360

403361
qnnbackend=$3
404362
if [ ${qnnbackend} -gt 3 ]; then

‎scripts/build-run-windows.sh

Lines changed: 0 additions & 222 deletions
This file was deleted.

‎src/llama-mmap.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -481,10 +481,10 @@ struct llama_mlock::impl {
481481
// Skip resource limit checks on visionOS/tvOS
482482
suggest = false;
483483
#else
484-
struct rlimit lock_limit = {};
485-
//if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
486-
// suggest = false;
487-
//}
484+
struct rlimit lock_limit;
485+
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
486+
suggest = false;
487+
}
488488
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
489489
suggest = false;
490490
}

0 commit comments

Comments
 (0)
Please sign in to comment.