return { -- { -- "olimorris/codecompanion.nvim", -- dependencies = { -- "nvim-lua/plenary.nvim", -- "nvim-treesitter/nvim-treesitter", -- }, -- config = true, -- }, -- { -- "Jacob411/Ollama-Copilot", -- opts = { -- model_name = "deepseek-coder:base", -- stream_suggestion = false, -- python_command = "python3", -- filetypes = { "python", "lua", "vim", "markdown" }, -- ollama_model_opts = { -- num_predict = 40, -- temperature = 0.1, -- }, -- keymaps = { -- suggestion = "os", -- reject = "or", -- insert_accept = "", -- }, -- }, -- }, -- { -- "TabbyML/vim-tabby", -- lazy = false, -- dependencies = { -- "neovim/nvim-lspconfig", -- }, -- init = function() -- vim.g.tabby_agent_start_command = { "npx", "tabby-agent", "--stdio" } -- vim.g.tabby_inline_completion_trigger = "auto" -- end, -- }, -- { -- "Saghen/blink.cmp", -- config = function(_, opts) -- local additions = { -- keymap = { -- -- Manually invoke minuet completion. -- [""] = require("minuet").make_blink_map(), -- }, -- sources = { -- -- Enable minuet for autocomplete -- default = { "lsp", "path", "buffer", "snippets", "minuet" }, -- -- For manual completion only, remove 'minuet' from default -- providers = { -- minuet = { -- name = "minuet", -- module = "minuet.blink", -- score_offset = 8, -- Gives minuet higher priority among suggestions -- }, -- }, -- }, -- -- Recommended to avoid unnecessary request -- completion = { trigger = { prefetch_on_insert = false } }, -- } -- -- TODO merge with users config -- local merged_with_opts = vim.tbl_deep_extend(opts, additions) -- require("blink-cmp").setup(merged_with_opts) -- end, -- }, -- { -- "Davidyz/VectorCode", -- enabled = vim.loop.os_uname().sysname == "Darwin", -- version = "*", -- optional, depending on whether you're on nightly or release -- build = "pipx upgrade vectorcode", -- optional but recommended if you set `version = "*"` -- dependencies = { "nvim-lua/plenary.nvim" }, -- }, -- { -- "huggingface/llm.nvim", -- opts = { -- backend = "ollama", -- model = "codellama:7b-instruct", -- url = "http://localhost:11434", -- llm-ls uses "/api/generate" -- -- cf https://github.com/ollama/ollama/blob/main/docs/api.md#parameters -- request_body = { -- -- Modelfile options for the model you use -- options = { -- temperature = 0.2, -- top_p = 0.95, -- }, -- }, -- }, -- }, { "zbirenbaum/copilot.lua", optional = true, opts = function() require("copilot.api").status = require("copilot.status") return { suggestion = { auto_trigger = true, debounce = 100, keymap = { accept = "", next = "", prev = "", dismiss = "", }, }, panel = { enabled = false, auto_refresh = true, keymap = { jump_next = "", jump_prev = "", accept = "", refresh = "r", open = "", }, }, } end, }, }