From 5a1916e4785973ea43a513f2cf127921af30a4dc Mon Sep 17 00:00:00 2001 From: "Schmidt, Tibor" Date: Wed, 31 Jan 2024 23:46:55 +0100 Subject: [PATCH] chore: wip backup + lmstudio test --- lua/gp/config.lua | 57 +++++++++++++++++++++++++++++++++++++++++++++-- lua/gp/init.lua | 53 +++++++++++++++++++++++++++++++------------ 2 files changed, 94 insertions(+), 16 deletions(-) diff --git a/lua/gp/config.lua b/lua/gp/config.lua index 0966f2a..0c5ba18 100644 --- a/lua/gp/config.lua +++ b/lua/gp/config.lua @@ -43,8 +43,11 @@ local config = { }, }, ollama = { - -- endpoint = "http://localhost:8000/v1/chat/completions", + endpoint = "http://localhost:11434/api/chat", }, + lmsudio = { + endpoint = "http://localhost:1234/v1/chat/completions" + } }, -- prefix for all commands @@ -115,6 +118,38 @@ local config = { .. "- Don't elide any code from your output if the answer requires coding.\n" .. "- Take a deep breath; You've got this!\n", }, + { + provider = "ollama", + name = "ChatOllama", + chat = true, + command = false, + -- string with model name or table with model name and parameters + model = { + model = "mistral:7b-instruct-v0.2-q4_K_M", + temperature = 1.97, + top_p = 1, + num_ctx = 8192, + min_p = 0.05, + }, + -- system prompt (use this to specify the persona/role of the AI) + system_prompt = "You are a general AI assistant.", + }, + { + provider = "lmsudio", + name = "ChatLMStudio", + chat = true, + command = false, + -- string with model name or table with model name and parameters + model = { + model = "dummy", + temperature = 0.97, + top_p = 1, + num_ctx = 8192, + min_p = 0.05, + }, + -- system prompt (use this to specify the persona/role of the AI) + system_prompt = "You are a general AI assistant.", + }, { provider = "openai", name = "CodeGPT4", @@ -151,6 +186,25 @@ local config = { .. "Please AVOID COMMENTARY OUTSIDE OF THE SNIPPET RESPONSE.\n" .. "START AND END YOUR ANSWER WITH:\n\n```", }, + { + provider = "ollama", + name = "CodeOllamaDeepSeek", + chat = false, + command = true, + -- string with the Copilot engine name or table with engine name and parameters if applicable + model = { + model = "mistral:7b-instruct-v0.2-q4_K_M", + temperature = 1.9, + top_p = 1, + num_ctx = 8192, + min_p = 0.05, + }, + -- system prompt (use this to specify the persona/role of the AI) + system_prompt = "You are an AI working as a code editor providing answers.\n\n" + .. "Use 4 SPACES FOR INDENTATION.\n" + .. "Please AVOID COMMENTARY OUTSIDE OF THE SNIPPET RESPONSE.\n" + .. "START AND END YOUR ANSWER WITH:\n\n```", + }, }, -- directory for storing chat files @@ -166,7 +220,6 @@ local config = { chat_topic_gen_prompt = "Summarize the topic of our conversation above" .. " in two or three words. Respond only with those words.", -- chat topic model (string with model name or table with model name and parameters) - chat_topic_gen_model = "gpt-3.5-turbo-16k", -- explicitly confirm deletion of a chat file chat_confirm_delete = true, -- conceal model parameters in chat diff --git a/lua/gp/init.lua b/lua/gp/init.lua index 1f51298..45b7897 100644 --- a/lua/gp/init.lua +++ b/lua/gp/init.lua @@ -1142,10 +1142,10 @@ end ---@param messages table ---@param model string | table | nil ---@param default_model string | table -M.prepare_payload = function(messages, model, default_model) +---@param provider string | nil +M.prepare_payload = function(messages, model, default_model, provider) model = model or default_model - -- if model is a string if type(model) == "string" then return { model = model, @@ -1154,7 +1154,23 @@ M.prepare_payload = function(messages, model, default_model) } end - -- if model is a table + if provider == "ollama" then + local options = {} + for k, v in pairs(model) do + if k ~= "provider" and k ~= "model" then + options[k] = v + end + end + options.temperature = math.max(0, math.min(2, options.temperature or 1)) + options.top_p = math.max(0, math.min(1, options.top_p or 1)) + return { + model = model.model, + stream = true, + messages = messages, + options = options, + } + end + return { model = model.model, stream = true, @@ -1198,7 +1214,7 @@ end -- gpt query ---@param buf number | nil # buffer number ---@param provider string # provider name ----@param payload table # payload for openai api +---@param payload table # payload for api ---@param handler function # response handler ---@param on_exit function | nil # optional on_exit handler M.query = function(buf, provider, payload, handler, on_exit) @@ -1248,16 +1264,25 @@ M.query = function(buf, provider, payload, handler, on_exit) qt.raw_response = qt.raw_response .. line .. "\n" end line = line:gsub("^data: ", "") + local content = "" if line:match("choices") and line:match("delta") and line:match("content") then line = vim.json.decode(line) if line.choices[1] and line.choices[1].delta and line.choices[1].delta.content then - local content = line.choices[1].delta.content - if content and type(content) == "string" then - qt.response = qt.response .. content - handler(qid, content) - end + content = line.choices[1].delta.content + end + end + + if provider == "ollama" and line:match("message") and line:match("content") then + line = vim.json.decode(line) + if line.message and line.message.content then + content = line.message.content end end + + if content and type(content) == "string" then + qt.response = qt.response .. content + handler(qid, content) + end end end @@ -1269,7 +1294,7 @@ M.query = function(buf, provider, payload, handler, on_exit) end if err then - M.error("OpenAI query stdout error: " .. vim.inspect(err)) + M.error(qt.provider .. " query stdout error: " .. vim.inspect(err)) elseif chunk then -- add the incoming chunk to the buffer buffer = buffer .. chunk @@ -1289,7 +1314,7 @@ M.query = function(buf, provider, payload, handler, on_exit) end if qt.response == "" then - M.error("OpenAI query response is empty: \n" .. vim.inspect(qt.raw_response)) + M.error(qt.provider .. " response is empty: \n" .. vim.inspect(qt.raw_response)) end -- optional on_exit handler @@ -2135,7 +2160,7 @@ M.chat_respond = function(params) M.query( buf, agent.provider, - M.prepare_payload(messages, headers.model, agent.model), + M.prepare_payload(messages, headers.model, agent.model, agent.provider), M.create_handler(buf, win, M._H.last_content_line(buf), true, "", not M.config.chat_free_cursor), vim.schedule_wrap(function(qid) local qt = M.get_query(qid) @@ -2178,7 +2203,7 @@ M.chat_respond = function(params) M.query( nil, agent.provider, - M.prepare_payload(messages, nil, M.config.chat_topic_gen_model), + M.prepare_payload(messages, nil, agent.model, agent.provider), topic_handler, vim.schedule_wrap(function() -- get topic from invisible buffer @@ -2947,7 +2972,7 @@ M.Prompt = function(params, target, prompt, model, template, system_template, wh M.query( buf, provider, - M.prepare_payload(messages, model, agent.model), + M.prepare_payload(messages, model, agent.model, agent.provider), handler, vim.schedule_wrap(function(qid) on_exit(qid)