Skip to content

Commit 4ce161b

Browse files
committed
Add backend/index.yaml entries for llama-cpp on rocm7
1 parent 0b6230a commit 4ce161b

File tree

1 file changed

+12
-0
lines changed

1 file changed

+12
-0
lines changed

backend/index.yaml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
nvidia: "cuda12-llama-cpp"
2323
intel: "intel-sycl-f16-llama-cpp"
2424
amd-rocm-6: "rocm6-llama-cpp"
25+
amd-rocm-7: "rocm7-llama-cpp"
2526
metal: "metal-llama-cpp"
2627
vulkan: "vulkan-llama-cpp"
2728
nvidia-l4t: "nvidia-l4t-arm64-llama-cpp"
@@ -516,6 +517,7 @@
516517
nvidia: "cuda12-llama-cpp-development"
517518
intel: "intel-sycl-f16-llama-cpp-development"
518519
amd-rocm-6: "rocm6-llama-cpp-development"
520+
amd-rocm-7: "rocm7-llama-cpp-development"
519521
metal: "metal-llama-cpp-development"
520522
vulkan: "vulkan-llama-cpp-development"
521523
nvidia-l4t: "nvidia-l4t-arm64-llama-cpp-development"
@@ -649,6 +651,11 @@
649651
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-amd-rocm-6-llama-cpp"
650652
mirrors:
651653
- localai/localai-backends:latest-gpu-amd-rocm-6-llama-cpp
654+
- !!merge <<: *llamacpp
655+
name: "rocm7-llama-cpp"
656+
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-amd-rocm-7-llama-cpp"
657+
mirrors:
658+
- localai/localai-backends:latest-gpu-amd-rocm-7-llama-cpp
652659
- !!merge <<: *llamacpp
653660
name: "intel-sycl-f32-llama-cpp"
654661
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-llama-cpp"
@@ -694,6 +701,11 @@
694701
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-amd-rocm-6-llama-cpp"
695702
mirrors:
696703
- localai/localai-backends:master-gpu-amd-rocm-6-llama-cpp
704+
- !!merge <<: *llamacpp
705+
name: "rocm7-llama-cpp-development"
706+
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-amd-rocm-7-llama-cpp"
707+
mirrors:
708+
- localai/localai-backends:master-gpu-amd-rocm-7-llama-cpp
697709
- !!merge <<: *llamacpp
698710
name: "intel-sycl-f32-llama-cpp-development"
699711
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-llama-cpp"

0 commit comments

Comments
 (0)