Currently supports: Anthropic, Ollama and OpenAI adapters
Important
This plugin is provided as-is and is primarily developed for my own workflows. As such, I offer no guarantees of regular updates or support and I expect the plugin's API to change regularly. Bug fixes and feature enhancements will be implemented at my discretion, and only if they align with my personal use-cases. Feel free to fork the project and customize it to your needs, but please understand my involvement in further development will be intermittent. To be notified of breaking changes in the plugin, please subscribe to this issue.
- 💬 A Copilot Chat experience in Neovim
- 🔌 Support for OpenAI, Anthropic and Ollama
- 🚀 Inline code creation and refactoring
- 🤖 Agents and Workflows to improve LLM output
- ✨ Built in prompts for LSP errors and code advice
- 🏗️ Create your own custom prompts for Neovim
- 💾 Save and restore your chats
- 💪 Async execution for improved performance
Chat.Buffer.mp4
Inline.Coding.mp4
- The
curl
library installed - Neovim 0.9.2 or greater
- (Optional) An API key for your chosen LLM
- (Optional) The
base64
library installed
Install the plugin with your package manager of choice:
-- Lazy.nvim
{
"olimorris/codecompanion.nvim",
dependencies = {
"nvim-lua/plenary.nvim",
"nvim-treesitter/nvim-treesitter",
"nvim-telescope/telescope.nvim", -- Optional
{
"stevearc/dressing.nvim", -- Optional: Improves the default Neovim UI
opts = {},
},
},
config = true
}
-- Packer.nvim
use({
"olimorris/codecompanion.nvim",
config = function()
require("codecompanion").setup()
end,
requires = {
"nvim-lua/plenary.nvim",
"nvim-treesitter/nvim-treesitter",
"nvim-telescope/telescope.nvim", -- Optional
"stevearc/dressing.nvim" -- Optional: Improves the default Neovim UI
}
})
You only need to the call the setup
function if you wish to change any of the defaults:
Click to see the default configuration
require("codecompanion").setup({
adapters = {
anthropic = "anthropic",
ollama = "ollama",
openai = "openai",
},
strategies = {
chat = {
adapter = "openai",
-- Helpers which can be referenced in the chat buffer with the "@" symbol e.g. "@buffer"
helpers = {
["buffer"] = {
callback = "helpers.chat.buffer",
category = "buffer",
description = "Share the current buffer with the LLM",
},
["buffers"] = {
callback = "helpers.chat.buffers",
category = "buffer",
description = "Share all loaded buffers (matching the filetype) with the LLM",
},
["bufferq"] = {
callback = "helpers.chat.bufferq",
category = "buffer",
description = "Share the content of the buffers from the quickfix list",
},
},
keymaps = {
["<C-s>"] = "keymaps.save",
["<C-c>"] = "keymaps.close",
["q"] = "keymaps.stop",
["gc"] = "keymaps.clear",
["ga"] = "keymaps.codeblock",
["gs"] = "keymaps.save_chat",
["gt"] = "keymaps.add_agent",
["]"] = "keymaps.next",
["["] = "keymaps.previous",
},
},
inline = {
adapter = "openai",
keymaps = {
["gc"] = "keymaps.clear_diff",
},
prompts = {
-- The prompt to send to the LLM when a user initiates the inline strategy and it needs to convert to a chat
inline_to_chat = function(context)
return "I want you to act as an expert and senior developer in the "
.. context.filetype
.. " language. I will ask you questions, perhaps giving you code examples, and I want you to advise me with explanations and code where neccessary."
end,
},
},
agent = {
adapter = "openai",
agents = {
["code_runner"] = {
name = "Code Runner",
description = "Run code generated by the LLM",
enabled = true,
},
["rag"] = {
name = "RAG",
description = "Supplement the LLM with real-time information",
enabled = true,
},
["buffer_editor"] = {
name = "Buffer Editor",
description = "Edit code by searching and replacing blocks",
enabled = true,
},
opts = {
auto_submit_errors = false,
auto_submit_success = false,
},
},
},
},
action_prompts = {
["Custom Prompt"] = {
strategy = "inline",
description = "Send a custom prompt to the LLM",
opts = {
index = 1,
default_prompt = true,
mapping = "<LocalLeader>cc",
user_prompt = true,
},
prompts = {
{
role = "system",
tag = "system_tag",
content = function(context)
if context.buftype == "terminal" then
return "I want you to act as an expert in writing terminal commands that will work for my current shell "
.. os.getenv("SHELL")
.. ". I will ask you specific questions and I want you to return the raw command only (no codeblocks and explanations). If you can't respond with a command, respond with nothing"
end
return "I want you to act as a senior "
.. context.filetype
.. " developer. I will ask you specific questions and I want you to return raw code only (no codeblocks and no explanations). If you can't respond with code, respond with nothing"
end,
},
},
},
["Senior Developer"] = {
strategy = "chat",
name_f = function(context)
return "Senior " .. utils.capitalize(context.filetype) .. " Developer"
end,
description = function(context)
local filetype
if context and context.filetype then
filetype = utils.capitalize(context.filetype)
end
return "Chat with a senior " .. (filetype or "") .. " developer"
end,
opts = {
index = 2,
default_prompt = true,
modes = { "n", "v" },
mapping = "<LocalLeader>ce",
auto_submit = false,
stop_context_insertion = true,
},
prompts = {
{
role = "system",
content = function(context)
return "I want you to act as an expert and senior developer in the "
.. context.filetype
.. " language. I will ask you questions, perhaps giving you code examples, and I want you to advise me with explanations and code where neccessary."
end,
},
{
role = "user",
contains_code = true,
condition = function(context)
return context.is_visual
end,
content = function(context)
local text = require("codecompanion.helpers.code").get_code(context.start_line, context.end_line)
return "I have the following code:\n\n```" .. context.filetype .. "\n" .. text .. "\n```\n\n"
end,
},
{
role = "user",
condition = function(context)
return not context.is_visual
end,
content = "\n \n",
},
},
},
["Code Advisor"] = {
strategy = "chat",
description = "Get advice on the code you've selected",
opts = {
index = 3,
default_prompt = true,
mapping = "<LocalLeader>ca",
modes = { "v" },
shortcut = "advisor",
auto_submit = true,
user_prompt = true,
stop_context_insertion = true,
},
prompts = {
{
role = "system",
content = function(context)
return "I want you to act as a senior "
.. context.filetype
.. " developer. I will ask you specific questions and I want you to return concise explanations and codeblock examples."
end,
},
{
role = "user",
contains_code = true,
content = function(context)
local code = require("codecompanion.helpers.code").get_code(context.start_line, context.end_line)
return "I have the following code:\n\n```" .. context.filetype .. "\n" .. code .. "\n```\n\n"
end,
},
},
},
["Explain LSP Diagnostics"] = {
strategy = "chat",
description = "Use an LLM to explain any LSP diagnostics",
opts = {
index = 4,
default_prompt = true,
mapping = "<LocalLeader>cl",
modes = { "v" },
shortcut = "lsp",
auto_submit = true,
user_prompt = false, -- Prompt the user for their own input
stop_context_insertion = true,
},
prompts = {
{
role = "system",
content = [[You are an expert coder and helpful assistant who can help debug code diagnostics, such as warning and error messages. When appropriate, give solutions with code snippets as fenced codeblocks with a language identifier to enable syntax highlighting.]],
},
{
role = "user",
content = function(context)
local diagnostics =
require("codecompanion.helpers.lsp").get_diagnostics(context.start_line, context.end_line, context.bufnr)
local concatenated_diagnostics = ""
for i, diagnostic in ipairs(diagnostics) do
concatenated_diagnostics = concatenated_diagnostics
.. i
.. ". Issue "
.. i
.. "\n - Location: Line "
.. diagnostic.line_number
.. "\n - Severity: "
.. diagnostic.severity
.. "\n - Message: "
.. diagnostic.message
.. "\n"
end
return "The programming language is "
.. context.filetype
.. ". This is a list of the diagnostic messages:\n\n"
.. concatenated_diagnostics
end,
},
{
role = "user",
contains_code = true,
content = function(context)
return "This is the code, for context:\n\n"
.. "```"
.. context.filetype
.. "\n"
.. require("codecompanion.helpers.code").get_code(
context.start_line,
context.end_line,
{ show_line_numbers = true }
)
.. "\n```\n\n"
end,
},
},
},
["Generate a Commit Message"] = {
strategy = "chat",
description = "Generate a commit message",
opts = {
index = 5,
default_prompt = true,
mapping = "<LocalLeader>cm",
shortcut = "commit",
auto_submit = true,
},
prompts = {
{
role = "user",
contains_code = true,
content = function()
return "You are an expert at following the Conventional Commit specification. Given the git diff listed below, please generate a commit message for me:"
.. "\n\n```\n"
.. vim.fn.system("git diff")
.. "\n```"
end,
},
},
},
},
display = {
action_palette = {
width = 95,
height = 10,
},
chat = {
window = {
layout = "vertical", -- float|vertical|horizontal|buffer
border = "single",
height = 0.8,
width = 0.45,
relative = "editor",
opts = {
breakindent = true,
cursorcolumn = false,
cursorline = false,
foldcolumn = "0",
linebreak = true,
list = false,
signcolumn = "no",
spell = false,
wrap = true,
},
},
intro_message = "Welcome to CodeCompanion ✨! Save the buffer to send a message...",
show_settings = true,
show_token_count = true,
},
inline = {
diff = {
enabled = true,
priority = 130,
hl_groups = {
removed = "DiffDelete",
},
},
},
},
opts = {
log_level = "ERROR",
saved_chats_dir = vim.fn.stdpath("data") .. "/codecompanion/saved_chats",
send_code = true,
silence_notifications = false,
use_default_actions = true,
use_default_prompts = true,
system_prompt = string.format(
[[You are an AI programming assistant named "CodeCompanion," built by Oli Morris. Follow the user's requirements carefully and to the letter. Your expertise is strictly limited to software development topics. Avoid content that violates copyrights. For questions not related to the general topic of software development, remind the user that you are an AI programming assistant. Keep your answers short and impersonal.
You can answer general programming questions and perform the following tasks:
- Ask questions about the files in your current workspace
- Explain how the selected code works
- Generate unit tests for the selected code
- Propose a fix for problems in the selected code
- Scaffold code for a new feature
- Ask questions about Neovim
- Ask how to do something in the terminal
First, think step-by-step and describe your plan in pseudocode, written out in great detail. Then, output the code in a single code block. Minimize any other prose. Use Markdown formatting in your answers, and include the programming language name at the start of the Markdown code blocks. Avoid wrapping the whole response in triple backticks. The user works in a text editor called Neovim and the version is %d.%d.%d. Neovim has concepts for editors with open files, integrated unit test support, an output pane for running code, and an integrated terminal. The active document is the source code the user is looking at right now. You can only give one reply for each conversation turn.
You also have access to agents that you can use to initiate actions on the user's machine:
- Code Runner: To run any code that you've generated and receive the output
- RAG: To supplement your responses with real-time information and insight
When informed by the user of an available agent, pay attention to the schema that the user provides in order to execute the agent.]],
vim.version().major,
vim.version().minor,
vim.version().patch
),
},
})
Adapters
Warning
Depending on your chosen adapter, you may need to set an API key.
The plugin uses adapters to bridge between LLMs and the plugin. Currently the plugin supports:
- Anthropic (
anthropic
) - Requires an API key - Ollama (
ollama
) - OpenAI (
openai
) - Requires an API key
Strategies are the different ways that a user can interact with the plugin. The chat and agent strategies harness a buffer to allow direct conversation with the LLM. The inline strategy allows for output from the LLM to be written directly into a pre-existing Neovim buffer.
To specify a different adapter to the defaults, simply change the strategies.*
table:
require("codecompanion").setup({
strategies = {
chat = {
adapter = "ollama",
},
inline = {
adapter = "ollama",
},
agent = {
adapter = "anthropic",
},
},
})
Tip
To create your own adapter please refer to the ADAPTERS guide.
Configuring environment variables
You can customise an adapter's configuration as follows:
require("codecompanion").setup({
adapters = {
anthropic = require("codecompanion.adapters").use("anthropic", {
env = {
api_key = "ANTHROPIC_API_KEY_1"
},
}),
},
strategies = {
chat = {
adapter = "anthropic",
},
},
})
In the example above, we've changed the name of the default API key which the Anthropic adapter uses.
Having API keys in plain text in your shell is not always safe. Thanks to this PR, you can run commands from within the configuration:
require("codecompanion").setup({
adapters = {
openai = require("codecompanion.adapters").use("openai", {
env = {
api_key = "cmd:op read op://personal/OpenAI/credential --no-newline",
},
}),
strategies = {
chat = {
adapter = "openai",
},
},
},
})
In this example, we're using the 1Password CLI to read an OpenAI credential.
Configuring adapter settings
LLMs have many settings such as model, temperature and max_tokens. In an adapter, these sit within a schema table and can be configured during setup:
require("codecompanion").setup({
adapters = {
anthropic = require("codecompanion.adapters").use("anthropic", {
schema = {
model = {
default = "claude-3-sonnet-20240229",
},
},
}),
},
})
Tip
Refer to your chosen adapter to see the settings available.
Highlight Groups
The plugin sets the following highlight groups during setup:
CodeCompanionTokens
- Virtual text in the chat buffer showing the token countCodeCompanionVirtualText
- All other virtual text in the chat bufferCodeCompanionVirtualTextAgents
- Virtual text in the chat buffer for when a agent is running
Inline Prompting
Inline.Prompting.mp4
To start interacting with the plugin you can run :CodeCompanion <your prompt>
from the command line. You can also make a visual selection in Neovim and run :'<,'>CodeCompanion <your prompt>
to send it as context. A command such as :'<,'>CodeCompanion what does this code do?
will prompt the LLM the respond in a chat buffer allowing you to ask any follow up questions. Whereas a command such as :CodeCompanion can you create a function that outputs the current date and time
would result in the output being placed at the cursor's position in the buffer.
In the video, you'll notice that we're triggering a pre-defined LSP warnings prompt by running :'<,'>:CodeCompanion @lsp
. You can find more on this in the default prompts section.
Note
Any open and loaded buffers can be sent to the LLM as context with the @buffers
keyword, assuming they match the current filetype. For example: :'<,'>CodeCompanion @buffers <your prompt>
. The @buffer
keyword is also available for sending the current open buffer to the LLM.
Chat Buffer
The chat buffer is where you'll likely spend most of your time when interacting with the plugin. Running :CodeCompanionChat
or :'<,'>CodeCompanionChat
will open up a chat buffer where you can converse directly with an LLM. As a convenience, you can use :CodeCompanionToggle
to toggle the visibility of a chat buffer.
When in the chat buffer, you can leverage helpers via the use of keywords, anywhere under the user
heading:
@buffer
- To include the contents of the buffer you initiated the chat from@buffers
- To include the contents of all loaded buffers that match the current filetype
There are also many keymaps you can leverage in the chat buffer which are covered in the chat buffer section of this readme.
Action Palette
The :CodeCompanionActions
command will open the Action Palette, giving you access to all of the functionality in the plugin. The Prompts section is where your custom prompts and the pre-defined ones can be accessed. You'll notice that some prompts have a label in their description such as @commit
. This enables you to trigger them from the command line by doing :CodeCompanion @commit
. Some of these prompts also have keymaps assigned to them (which can be overwritten!) which offers an even easier route to triggering them.
Note
Some actions will only be visible in the Action Palette if you're in Visual mode.
List of commands
Below is the full list of commands that are available in the plugin:
CodeCompanionActions
- To open the Action PaletteCodeCompanion
- Inline prompting of the pluginCodeCompanion <shortcut>
- Inline prompting of the plugin with a shortcut e.g.@commit
CodeCompanionChat
- To open up a new chat bufferCodeCompanionChat <adapter>
- To open up a new chat buffer with a specific adapterCodeCompanionToggle
- To toggle a chat bufferCodeCompanionAdd
- To add visually selected chat to the current chat buffer
Suggested workflow
For an optimum workflow, I recommend the following options:
vim.api.nvim_set_keymap("n", "<C-a>", "<cmd>CodeCompanionActions<cr>", { noremap = true, silent = true })
vim.api.nvim_set_keymap("v", "<C-a>", "<cmd>CodeCompanionActions<cr>", { noremap = true, silent = true })
vim.api.nvim_set_keymap("n", "<LocalLeader>a", "<cmd>CodeCompanionToggle<cr>", { noremap = true, silent = true })
vim.api.nvim_set_keymap("v", "<LocalLeader>a", "<cmd>CodeCompanionToggle<cr>", { noremap = true, silent = true })
vim.api.nvim_set_keymap("v", "ga", "<cmd>CodeCompanionAdd<cr>", { noremap = true, silent = true })
-- Expand 'cc' into 'CodeCompanion' in the command line
vim.cmd([[cab cc CodeCompanion]])
A RECIPES guide has been created to show you how you can add your own prompts to the Action Palette.
The chat buffer is where you can converse with an LLM, directly from Neovim. It behaves as a regular markdown buffer with some clever additions. When the buffer is written (or "saved"), autocmds trigger the sending of its content to the LLM in the form of prompts. These prompts are segmented by H1 headers: user
, system
and assistant
. When a response is received, it is then streamed back into the buffer. The result is that you experience the feel of conversing with your LLM from within Neovim.
Keymaps
When in the chat buffer, there are number of keymaps available to you:
<C-s>
- Save the buffer and trigger a response from the LLM<C-c>
- Close the bufferq
- Cancel the stream from the LLMgc
- Clear the buffer's contentsga
- Add a codeblockgs
- Save the chat to diskgt
- Add an agent to an existing chat[
- Move to the next header]
- Move to the previous header
Saved Chats
Chat buffers are not saved to disk by default, but can be by pressing gs
in the buffer. Saved chats can then be restored via the Action Palette and the Load saved chats action.
Settings
If display.chat.show_settings
is set to true
, at the very top of the chat buffer will be the adapter's model parameters which can be changed to tweak the response. You can find more detail about them by moving the cursor over them.
Open Chats
From the Action Palette, the Open Chats
action enables users to easily navigate between their open chat buffers. A chat buffer can be deleted (and removed from memory) by pressing <C-c>
.
Note
If send_code = false
then this will take precedent and no buffers will be sent to the LLM
Inline prompts can be triggered via the CodeCompanion <your prompt>
command. As mentioned in the Getting Started guide, you can also leverage visual selections and prompt shortcuts like '<,'>CodeCompanion @lsp
.
One of the challenges with inline editing is determining how the LLM's response should be handled in the buffer. If you've prompted the LLM to "create a table of 5 common text editors" then you may wish for the response to be placed after the cursor's current position in the buffer. However, if you asked the LLM to "refactor this function" then you'd expect the response to overwrite a visual selection. The plugin will use the inline LLM you've specified to determine if the response should follow any of the placements below:
- after - after the visual selection/cursor
- before - before the visual selection/cursor
- new - in a new buffer
- replace - replacing the visual selection
- chat - in a chat buffer
Note
Please see the RECIPES guide in order to add your own pre-defined prompts to the palette.
The plugin comes with a number of default prompts and corresponding keymaps/shortcuts:
- Custom Prompt - For custom inline prompting of an LLM (
<LocalLeader>cc
) - Senior Developer - Chat with a senior developer for the given filetype (
<LocalLeader>ce
) - Generate a Commit Message - Use an LLM to write a commit message for you (
<LocalLeader>cm
/@commit
) - Code Advisor - Get advice from an LLM on code you've selected (
<LocalLeader>ca
/@advisor
) - Explain LSP Diagnostics - Use an LLM to explain LSP diagnostics for code you've selected (
<LocalLeader>cl
/@lsp
)
Shortcuts can be accessed via the command line by typing :CodeCompanion @commit
.
Tools.mp4
Important
Agents are currently at an alpha stage. I'm yet to properly battle test them so feedback is much appreciated.
As outlined by Andrew Ng in Agentic Design Patterns Part 3, Tool Use, LLMs can act as agents by leveraging external tools. Andrew notes some common examples such as web searching or code execution that have obvious benefits when using LLMs.
In this plugin, agents are simply context that's given to an LLM via a system
prompt. This gives it knowledge and a defined schema which it can include in its response for the plugin to parse, execute and feedback on. Agents can be leveraged by opening up the action palette and choosing the Agents option. Or, agents can be added when in an existing chat buffer via the gt
keymap.
Agent types
Currently, there are two types of agent that are supported in the plugin:
- Command -These agents execute a series of shell commands or external scripts.
- Function - These agents perform actions directly within Neovim, interacting closely with buffers and the editor environment.
Built-in Agents
- Code Runner - A command-based agent that runs code generated by the LLM using Docker.
- RAG (Retrieval-Augmented Generation) - A command-based agent that supplements the LLM with real-time information.
- Buffer Editor - A function-based agent that edits code by searching and replacing blocks directly within Neovim buffers. This agent showcases a new, more flexible approach to agent implementation, allowing for complex operations that interact closely with the editor.
More information on how agents work and how you can create your own can be found in the AGENTS guide.
Warning
Workflows may result in the significant consumption of tokens if you're using an external LLM.
As outlined by Andrew Ng, agentic workflows have the ability to dramatically improve the output of an LLM. Infact, it's possible for older models like GPT 3.5 to outperform newer models (using traditional zero-shot inference). Andrew discussed how an agentic workflow can be utilised via multiple prompts that invoke the LLM to self reflect. Implementing Andrew's advice, the plugin supports this notion via the use of workflows. At various stages of a pre-defined workflow, the plugin will automatically prompt the LLM without any input or triggering required from the user.
Currently, the plugin comes with the following workflows:
- Adding a new feature
- Refactoring code
Of course you can add new workflows by following the RECIPES guide.
Hooks / User events
The plugin fires the following events during its lifecycle:
CodeCompanionRequest
- Fired during the API request. Outputsdata.status
with a value ofstarted
orfinished
CodeCompanionChatSaved
- Fired after a chat has been saved to diskCodeCompanionChat
- Fired at various points during the chat buffer. Comes with the following attributes:data.action = hide_buffer
- For when a chat buffer is hidden
CodeCompanionInline
- Fired during the inline API request alongsideCodeCompanionRequest
. Outputsdata.status
with a value ofstarted
orfinished
anddata.placement
with the placement of the text from the LLMCodeCompanionAgent
- Fired when an agent is running. Outputsdata.status
with a value ofstarted
orsuccess
/failure
Events can be hooked into as follows:
local group = vim.api.nvim_create_augroup("CodeCompanionHooks", {})
vim.api.nvim_create_autocmd({ "User" }, {
pattern = "CodeCompanionInline",
group = group,
callback = function(args)
if args.data.status == "finished" then
-- Format the buffer after the inline request has completed
require("conform").format({ bufnr = args.buf })
end
end,
})
Statuslines
You can incorporate a visual indication to show when the plugin is communicating with an LLM in your Neovim configuration. Below are examples for two popular statusline plugins.
lualine.nvim:
local M = require("lualine.component"):extend()
M.processing = false
M.spinner_index = 1
local spinner_symbols = {
"⠋",
"⠙",
"⠹",
"⠸",
"⠼",
"⠴",
"⠦",
"⠧",
"⠇",
"⠏",
}
local spinner_symbols_len = 10
-- Initializer
function M:init(options)
M.super.init(self, options)
local group = vim.api.nvim_create_augroup("CodeCompanionHooks", {})
vim.api.nvim_create_autocmd({ "User" }, {
pattern = "CodeCompanionRequest",
group = group,
callback = function(request)
self.processing = (request.data.status == "started")
end,
})
end
-- Function that runs every time statusline is updated
function M:update_status()
if self.processing then
self.spinner_index = (self.spinner_index % spinner_symbols_len) + 1
return spinner_symbols[self.spinner_index]
else
return nil
end
end
return M
heirline.nvim:
local CodeCompanion = {
static = {
processing = false,
},
update = {
"User",
pattern = "CodeCompanionRequest",
callback = function(self, args)
self.processing = (args.data.status == "started")
vim.cmd("redrawstatus")
end,
},
{
condition = function(self)
return self.processing
end,
provider = " ",
hl = { fg = "yellow" },
},
}
Legendary.nvim
The plugin also supports the amazing legendary.nvim plugin. Simply enable it in your config:
require('legendary').setup({
extensions = {
codecompanion = true,
},
})
I am open to contributions but they will be implemented at my discretion. Feel free to open up a discussion before embarking on a big PR and please make sure you've read the CONTRIBUTING.md guide.
- Steven Arcangeli for his genius creation of the chat buffer and his feedback
- Wtf.nvim for the LSP assistant action
- ChatGPT.nvim for the calculation of tokens