2025-04-09 11:04:11 +02:00

121 lines
3.5 KiB
Lua

return {
-- {
-- "olimorris/codecompanion.nvim",
-- dependencies = {
-- "nvim-lua/plenary.nvim",
-- "nvim-treesitter/nvim-treesitter",
-- },
-- config = true,
-- },
-- {
-- "Jacob411/Ollama-Copilot",
-- opts = {
-- model_name = "deepseek-coder:base",
-- stream_suggestion = false,
-- python_command = "python3",
-- filetypes = { "python", "lua", "vim", "markdown" },
-- ollama_model_opts = {
-- num_predict = 40,
-- temperature = 0.1,
-- },
-- keymaps = {
-- suggestion = "<leader>os",
-- reject = "<leader>or",
-- insert_accept = "<Tab>",
-- },
-- },
-- },
-- {
-- "TabbyML/vim-tabby",
-- lazy = false,
-- dependencies = {
-- "neovim/nvim-lspconfig",
-- },
-- init = function()
-- vim.g.tabby_agent_start_command = { "npx", "tabby-agent", "--stdio" }
-- vim.g.tabby_inline_completion_trigger = "auto"
-- end,
-- },
-- {
-- "Saghen/blink.cmp",
-- config = function(_, opts)
-- local additions = {
-- keymap = {
-- -- Manually invoke minuet completion.
-- ["<A-k>"] = require("minuet").make_blink_map(),
-- },
-- sources = {
-- -- Enable minuet for autocomplete
-- default = { "lsp", "path", "buffer", "snippets", "minuet" },
-- -- For manual completion only, remove 'minuet' from default
-- providers = {
-- minuet = {
-- name = "minuet",
-- module = "minuet.blink",
-- score_offset = 8, -- Gives minuet higher priority among suggestions
-- },
-- },
-- },
-- -- Recommended to avoid unnecessary request
-- completion = { trigger = { prefetch_on_insert = false } },
-- }
-- -- TODO merge with users config
-- local merged_with_opts = vim.tbl_deep_extend(opts, additions)
-- require("blink-cmp").setup(merged_with_opts)
-- end,
-- },
-- {
-- "Davidyz/VectorCode",
-- enabled = vim.loop.os_uname().sysname == "Darwin",
-- version = "*", -- optional, depending on whether you're on nightly or release
-- build = "pipx upgrade vectorcode", -- optional but recommended if you set `version = "*"`
-- dependencies = { "nvim-lua/plenary.nvim" },
-- },
-- {
-- "huggingface/llm.nvim",
-- opts = {
-- backend = "ollama",
-- model = "codellama:7b-instruct",
-- url = "http://localhost:11434", -- llm-ls uses "/api/generate"
-- -- cf https://github.com/ollama/ollama/blob/main/docs/api.md#parameters
-- request_body = {
-- -- Modelfile options for the model you use
-- options = {
-- temperature = 0.2,
-- top_p = 0.95,
-- },
-- },
-- },
-- },
{
"zbirenbaum/copilot.lua",
optional = true,
opts = function()
require("copilot.api").status = require("copilot.status")
return {
suggestion = {
auto_trigger = true,
debounce = 100,
keymap = {
accept = "<C-e>",
next = "<C-n>",
prev = "<C-p>",
dismiss = "<C-d>",
},
},
panel = {
enabled = false,
auto_refresh = true,
keymap = {
jump_next = "<C-n>",
jump_prev = "<C-p>",
accept = "<C-e>",
refresh = "r",
open = "<M-p>",
},
},
}
end,
},
}