diff --git a/README.md b/README.md index bb36ed6..b41dbfb 100644 --- a/README.md +++ b/README.md @@ -51,10 +51,16 @@ You can provide the following optional configuration table to the `setup` functi local defaults = { -- See plugin debugging logs debug = false, + -- New settings to allow a bit more of customization, and checking if ollama is run via docker or if its installed in the local system + docker = true, + ollama_host = "localhost", + ollama_port = "11434", -- The model for ollama to use. This model will be automatically downloaded. model = llama2, } +-- can also set a keymap to open ollama +vim.keymap.set("n", "co","Llama") ``` ### Model library diff --git a/lua/nvim-llama/init.lua b/lua/nvim-llama/init.lua index e9ad7b1..9e095fb 100644 --- a/lua/nvim-llama/init.lua +++ b/lua/nvim-llama/init.lua @@ -20,22 +20,33 @@ local function set_commands() M.interactive_llama() end, {}) end - local function is_docker_installed() local handle = io.popen("docker --version 2>&1") local result = handle:read("*a") handle:close() - return result:match("Docker version") end +local function is_ollama_installed() +if settings.docker == false then + local handle = io.popen("ollama --version 2>&1") + local result = handle:read("*a") + handle:close() + return result:match("ollama version") + end +end local function is_docker_running() + if settings.docker == true then local handle = io.popen("docker info > /dev/null 2>&1; echo $?") local result = handle:read("*a") handle:close() return result:match("0\n") -end + end + if settings.docker == false then + return true + end + end local function check_docker() if not is_docker_installed() then @@ -51,16 +62,6 @@ local function check_docker() return true end -local function async(command, args, callback) - vim.loop.spawn(command, {args = args}, function(code) - if code == 0 then - callback(true) - else - callback(false) - end - end) -end - local function is_container_running() local command = string.format("docker ps --filter 'name=^/nvim-llama$' --format '{{.Names}}'") local handle = io.popen(command) @@ -69,6 +70,13 @@ local function is_container_running() return result == "nvim-llama" end +local function is_ollama_running() + local command = string.format("curl http://" .. settings.ollama_host .. ":" .. settings.ollama_port) + local handle = io.popen(command) + local result = trim(handle:read("*a")) + handle:close() + return result:match("Ollama is running") + end local function check_ollama_container() local container_name = "nvim-llama" @@ -98,6 +106,7 @@ function M.setup(config) settings.set(config) end + if settings.docker == true then local status, err = pcall(check_docker) if not status then print("Error checking docker status: " .. err) @@ -107,7 +116,17 @@ function M.setup(config) if not status then print("Error checking docker status: " .. err) end - + end + if settings.docker == false then + local status, err = pcall(is_ollama_installed) + if not status then + print("Ollama doesnt seem to be installed" .. err) + end + status, err = pcall(is_ollama_running) + if not status then + print("Ollama doesnt seem to be running: " .. err) + end + end set_commands() end diff --git a/lua/nvim-llama/ollama.lua b/lua/nvim-llama/ollama.lua index 3bdc840..fb10ab5 100644 --- a/lua/nvim-llama/ollama.lua +++ b/lua/nvim-llama/ollama.lua @@ -38,26 +38,50 @@ function M.restart() if err then error("Failed to restart Ollama container: " .. err) end + return result end -function M.start() +function M.start(docker,ollama_host,ollama_port) + if docker == true then M.prepare() - local start_command = "docker run -d -p 11434:11434 -v " .. home .. "/.ollama:/root/.ollama --name nvim-llama ollama/ollama" + local start_command = "docker run -d -p" .. ollama_port .. ":11434 -v " .. home .. "/.ollama:/root/.ollama --name nvim-llama ollama/ollama" local handle, err = io.popen(start_command) local result = handle:read("*a") handle:close() - if err then error("Failed to start Ollama container: " .. err) end -end + return result + end + if docker == false then + is_started = "curl " .. "http://" .. ollama_host ":" .. ollama_port + local _, err = io.popen(is_started) + if err ~= nil then + error("Failed to get to the ollama_endpoint") + end + + end + end -function M.run(model) - return "docker exec -it nvim-llama ollama run " .. model +function M.run(model,docker) + if docker == true then + cmd ="docker exec -it nvim-llama ollama run " .. model + return cmd + end + if docker == false then + cmd ="ollama run " .. model + return cmd + end + return cmd end -function M.list() +function M.list(docker) + if docker == true then return "docker exec -it nvim-llama ollama list" + end + if docker == false then + return "ollama list" + end end return M diff --git a/lua/nvim-llama/settings.lua b/lua/nvim-llama/settings.lua index dec44bb..7440199 100644 --- a/lua/nvim-llama/settings.lua +++ b/lua/nvim-llama/settings.lua @@ -5,9 +5,12 @@ M.namespace = vim.api.nvim_create_namespace("nvim-llama") local defaults = { -- See plugin debugging logs debug = false, + docker = true, + ollama_host = "localhost", + ollama_port = "11434", -- the model to use with Ollama. - model = 'llama2', + model = 'llama3', } M.current = defaults