From a4129d9a5072da09f4502c3919f66176ec18b3ea Mon Sep 17 00:00:00 2001 From: YASH <139299779+Yash-2707@users.noreply.github.com> Date: Sat, 5 Oct 2024 13:39:46 +0530 Subject: [PATCH] Update README.md Requirements: A new section to list the necessary dependencies. Installation: A new section with a clear title and a brief description. Minimal Configuration: A new subsection to provide a simple example of how to install the plugin. Custom Parameters (with defaults): A new subsection to provide a detailed example of how to configure the plugin. Alternative Setup: A new subsection to provide an alternative way to set up the plugin. Usage: A new section with a clear title and a brief description. Example Key Maps: A new subsection to provide examples of how to use the plugin. Custom Prompts: A new section to explain how to create custom prompts. Prompt Properties: A new subsection to explain the properties of a prompt. Tip: A new section to provide a helpful tip. --- README.md | 87 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index 0e77dec..176fa9c 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +Sure! Here's a refined version of the README to improve clarity and readability: + +--- + # gen.nvim Generate text using LLMs with customizable prompts @@ -8,59 +12,55 @@ Generate text using LLMs with customizable prompts [![Local LLMs in Neovim: gen.nvim](https://user-images.githubusercontent.com/1009936/273126287-7b5f2b40-c678-47c5-8f21-edf9516f6034.jpg)](https://youtu.be/FIZt7MinpMY?si=KChSuJJDyrcTdYiM) +## Requirements -## Requires - -- [Ollama](https://ollama.ai/) with an appropriate model, e.g. [`llama3.1`](https://ollama.com/library/llama3.1), [`mistral`](https://ollama.ai/library/mistral), etc. +- [Ollama](https://ollama.ai/) with a suitable model, e.g. [`llama3.1`](https://ollama.com/library/llama3.1), [`mistral`](https://ollama.ai/library/mistral), etc. - [Curl](https://curl.se/) -## Install +## Installation Install with your favorite plugin manager, e.g. [lazy.nvim](https://github.com/folke/lazy.nvim) -Example with Lazy +### Minimal Configuration ```lua --- Minimal configuration -{ "David-Kunz/gen.nvim" }, - +{ "David-Kunz/gen.nvim" } ``` -```lua +### Custom Parameters (with defaults) --- Custom Parameters (with defaults) +```lua { "David-Kunz/gen.nvim", opts = { model = "mistral", -- The default model to use. - quit_map = "q", -- set keymap for close the response window - retry_map = "", -- set keymap to re-send the current prompt - accept_map = "", -- set keymap to replace the previous selection with the last result + quit_map = "q", -- Keymap to close the response window. + retry_map = "", -- Keymap to re-send the current prompt. + accept_map = "", -- Keymap to replace the previous selection with the last result. host = "localhost", -- The host running the Ollama service. port = "11434", -- The port on which the Ollama service is listening. - display_mode = "float", -- The display mode. Can be "float" or "split" or "horizontal-split". + display_mode = "float", -- The display mode. Can be "float", "split", or "horizontal-split". show_prompt = false, -- Shows the prompt submitted to Ollama. - show_model = false, -- Displays which model you are using at the beginning of your chat session. - no_auto_close = false, -- Never closes the window automatically. - file = false, -- Write the payload to a temporary file to keep the command short. - hidden = false, -- Hide the generation window (if true, will implicitly set `prompt.replace = true`), requires Neovim >= 0.10 - init = function(options) pcall(io.popen, "ollama serve > /dev/null 2>&1 &") end, - -- Function to initialize Ollama + show_model = false, -- Displays the model in use at the beginning of the chat session. + no_auto_close = false, -- Prevents the window from closing automatically. + file = false, -- Writes the payload to a temporary file to keep the command short. + hidden = false, -- Hides the generation window (implicitly sets `prompt.replace = true`), requires Neovim >= 0.10. + init = function(options) pcall(io.popen, "ollama serve > /dev/null 2>&1 &") end, -- Function to initialize Ollama. command = function(options) local body = {model = options.model, stream = true} return "curl --silent --no-buffer -X POST http://" .. options.host .. ":" .. options.port .. "/api/chat -d $body" end, - -- The command for the Ollama service. You can use placeholders $prompt, $model and $body (shellescaped). + -- The command for the Ollama service. You can use placeholders $prompt, $model, and $body (shellescaped). -- This can also be a command string. -- The executed command must return a JSON object with { response, context } -- (context property is optional). - -- list_models = '', -- Retrieves a list of model names + -- list_models = '', -- Retrieves a list of model names. debug = false -- Prints errors and the command which is run. } -}, +} ``` -Here are all [available models](https://ollama.ai/library). +### Alternative Setup Alternatively, you can call the `setup` function: @@ -70,13 +70,13 @@ require('gen').setup({ }) ``` - +Here are all [available models](https://ollama.ai/library). ## Usage -Use command `Gen` to generate text based on predefined and customizable prompts. +Use the command `Gen` to generate text based on predefined and customizable prompts. -Example key maps: +### Example Key Maps ```lua vim.keymap.set({ 'n', 'v' }, ']', ':Gen') @@ -88,17 +88,17 @@ You can also directly invoke it with one of the [predefined prompts](./lua/gen/p vim.keymap.set('v', ']', ':Gen Enhance_Grammar_Spelling') ``` -Once a conversation is started, the whole context is sent to the LLM. That allows you to ask follow-up questions with +Once a conversation is started, the whole context is sent to the LLM. This allows you to ask follow-up questions with: ```lua :Gen Chat ``` -and once the window is closed, you start with a fresh conversation. +And once the window is closed, you start with a fresh conversation. -For prompts which don't automatically replace the previously selected text (`replace = false`), you can replace the selected text with the generated output with ``. +For prompts that don’t automatically replace the previously selected text (`replace = false`), you can replace the selected text with the generated output using ``. -You can select a model from a list of all installed models with +You can select a model from a list of all installed models with: ```lua require('gen').select_model() @@ -106,9 +106,10 @@ require('gen').select_model() ## Custom Prompts -[All prompts](./lua/gen/prompts.lua) are defined in `require('gen').prompts`, you can enhance or modify them. +[All prompts](./lua/gen/prompts.lua) are defined in `require('gen').prompts`, and you can enhance or modify them. + +### Example -Example: ```lua require('gen').prompts['Elaborate_Text'] = { prompt = "Elaborate the following text:\n$text", @@ -121,17 +122,19 @@ require('gen').prompts['Fix_Code'] = { } ``` -You can use the following properties per prompt: +### Prompt Properties -- `prompt`: (string | function) Prompt either as a string or a function which should return a string. The result can use the following placeholders: - - `$text`: Visually selected text or the content of the current buffer - - `$filetype`: File type of the buffer (e.g. `javascript`) - - `$input`: Additional user input - - `$register`: Value of the unnamed register (yanked text) -- `replace`: `true` if the selected text shall be replaced with the generated output -- `extract`: Regular expression used to extract the generated result -- `model`: The model to use, e.g. `zephyr`, default: `mistral` +- `prompt`: (string | function) Prompt either as a string or a function that returns a string. The result can use the following placeholders: + - `$text`: Visually selected text or the content of the current buffer. + - `$filetype`: File type of the buffer (e.g. `javascript`). + - `$input`: Additional user input. + - `$register`: Value of the unnamed register (yanked text). +- `replace`: `true` if the selected text should be replaced with the generated output. +- `extract`: Regular expression used to extract the generated result. +- `model`: The model to use, e.g. `zephyr`, default: `mistral`. ## Tip User selections can be delegated to [Telescope](https://github.com/nvim-telescope/telescope.nvim) with [telescope-ui-select](https://github.com/nvim-telescope/telescope-ui-select.nvim). + +---