From 3dcd1d58ab06daef4df0af5a95e1b5b8a645aaab Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Fri, 3 Jan 2025 13:40:09 +0000 Subject: [PATCH] Update README and model.py for Nvidia GPU support README.md: Updated the Nvidia GPU (cuda) status to :white_check_mark: indicating support. model.py: Added logic to handle CUDA_VISIBLE_DEVICES, returning the corresponding quay.io/ramalama/cuda:latest image. Signed-off-by: Eric Curtin --- README.md | 2 +- ramalama/model.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a30075a2..193c834c 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ curl -fsSL https://raw.githubusercontent.com/containers/ramalama/s/install.sh | | Apple Silicon GPU (Linux / Asahi) | :white_check_mark: | | Apple Silicon GPU (macOS) | :white_check_mark: | | Apple Silicon GPU (podman-machine) | :white_check_mark: | -| Nvidia GPU (cuda) | :x: [Containerfile](https://github.com/containers/ramalama/blob/main/container-images/cuda/Containerfile) available but not published to quay.io | +| Nvidia GPU (cuda) | :white_check_mark: | | AMD GPU (rocm) | :white_check_mark: | ## COMMANDS diff --git a/ramalama/model.py b/ramalama/model.py index 086fde27..07bd3b29 100644 --- a/ramalama/model.py +++ b/ramalama/model.py @@ -101,8 +101,9 @@ def _image(self, args): gpu_type, _ = get_gpu() if gpu_type == "HIP_VISIBLE_DEVICES": return "quay.io/ramalama/rocm:latest" - - if gpu_type == "ASAHI_VISIBLE_DEVICES": + else if gpu_type == "CUDA_VISIBLE_DEVICES": + return "quay.io/ramalama/cuda:latest" + else if gpu_type == "ASAHI_VISIBLE_DEVICES": return "quay.io/ramalama/asahi:latest" return args.image