From d7c77e43ccf49aeb0782ff77c3c54b0a32129034 Mon Sep 17 00:00:00 2001 From: Tomas Slusny Date: Mon, 28 Jul 2025 17:05:17 +0200 Subject: [PATCH] feat(resources)!: add option to enable resource processing This adds option to enable resource processing, disabled by default. BREAKING CHANGE: intelligent resource processing is now disabled by default, use config.resource_processing: true to reenable Signed-off-by: Tomas Slusny --- README.md | 4 +++- lua/CopilotChat/client.lua | 20 +++----------------- lua/CopilotChat/config.lua | 5 ++++- lua/CopilotChat/init.lua | 14 +++++++++----- 4 files changed, 19 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 0535af19..931ee8ab 100644 --- a/README.md +++ b/README.md @@ -445,10 +445,12 @@ Below are all available configuration options with their default values: tools = nil, -- Default tool or array of tools (or groups) to share with LLM (can be specified manually in prompt via @). sticky = nil, -- Default sticky prompt or array of sticky prompts to use at start of every new chat (can be specified manually in prompt via >). + resource_processing = false, -- Enable intelligent resource processing (skips unnecessary resources to save tokens) + temperature = 0.1, -- Result temperature headless = false, -- Do not write to chat buffer and use history (useful for using custom processing) callback = nil, -- Function called when full response is received - remember_as_sticky = true, -- Remember model as sticky prompts when asking questions + remember_as_sticky = true, -- Remember config as sticky prompts when asking questions -- default selection -- see select.lua for implementation diff --git a/lua/CopilotChat/client.lua b/lua/CopilotChat/client.lua index b6460d0a..93143754 100644 --- a/lua/CopilotChat/client.lua +++ b/lua/CopilotChat/client.lua @@ -61,9 +61,7 @@ local class = utils.class --- Constants local RESOURCE_FORMAT = '# %s\n```%s\n%s\n```' local LINE_CHARACTERS = 100 -local BIG_FILE_THRESHOLD = 1000 * LINE_CHARACTERS local BIG_EMBED_THRESHOLD = 200 * LINE_CHARACTERS -local TRUNCATED = '... (truncated)' --- Resolve provider function ---@param model string @@ -103,16 +101,9 @@ end --- Generate content block with line numbers, truncating if necessary ---@param content string ----@param threshold number: The threshold for truncation ---@param start_line number?: The starting line number ---@return string -local function generate_content_block(content, threshold, start_line) - local total_chars = #content - if total_chars > threshold then - content = content:sub(1, threshold) - content = content .. '\n' .. TRUNCATED - end - +local function generate_content_block(content, start_line) if start_line ~= nil then local lines = vim.split(content, '\n') local total_lines = #lines @@ -144,12 +135,7 @@ local function generate_selection_message(selection) if selection.start_line and selection.end_line then out = out .. string.format('Excerpt from %s, lines %s to %s:\n', filename, selection.start_line, selection.end_line) end - out = out - .. string.format( - '```%s\n%s\n```', - filetype, - generate_content_block(content, BIG_FILE_THRESHOLD, selection.start_line) - ) + out = out .. string.format('```%s\n%s\n```', filetype, generate_content_block(content, selection.start_line)) return { content = out, @@ -167,7 +153,7 @@ local function generate_resource_messages(resources) return resource.data and resource.data ~= '' end) :map(function(resource) - local content = generate_content_block(resource.data, BIG_FILE_THRESHOLD, 1) + local content = generate_content_block(resource.data, 1) return { content = string.format(RESOURCE_FORMAT, resource.name, resource.type, content), diff --git a/lua/CopilotChat/config.lua b/lua/CopilotChat/config.lua index 5e900219..c456a5b1 100644 --- a/lua/CopilotChat/config.lua +++ b/lua/CopilotChat/config.lua @@ -17,6 +17,7 @@ ---@field model string? ---@field tools string|table|nil ---@field sticky string|table|nil +---@field resource_processing boolean? ---@field temperature number? ---@field headless boolean? ---@field callback nil|fun(response: string, source: CopilotChat.source) @@ -57,10 +58,12 @@ return { tools = nil, -- Default tool or array of tools (or groups) to share with LLM (can be specified manually in prompt via @). sticky = nil, -- Default sticky prompt or array of sticky prompts to use at start of every new chat (can be specified manually in prompt via >). + resource_processing = false, -- Enable intelligent resource processing (skips unnecessary resources to save tokens) + temperature = 0.1, -- Result temperature headless = false, -- Do not write to chat buffer and use history (useful for using custom processing) callback = nil, -- Function called when full response is received - remember_as_sticky = true, -- Remember model as sticky prompts when asking questions + remember_as_sticky = true, -- Remember config as sticky prompts when asking questions -- default selection selection = require('CopilotChat.select').visual, diff --git a/lua/CopilotChat/init.lua b/lua/CopilotChat/init.lua index c4da582e..426ccad3 100644 --- a/lua/CopilotChat/init.lua +++ b/lua/CopilotChat/init.lua @@ -903,11 +903,15 @@ function M.ask(prompt, config) local ok, err = pcall(async.run, function() local selected_tools, resolved_resources, resolved_tools, prompt = M.resolve_functions(prompt, config) local selected_model, prompt = M.resolve_model(prompt, config) - local query_ok, processed_resources = pcall(resources.process_resources, prompt, selected_model, resolved_resources) - if query_ok then - resolved_resources = processed_resources - else - log.warn('Failed to process resources', processed_resources) + + if config.resource_processing then + local query_ok, processed_resources = + pcall(resources.process_resources, prompt, selected_model, resolved_resources) + if query_ok then + resolved_resources = processed_resources + else + log.warn('Failed to process resources', processed_resources) + end end prompt = vim.trim(prompt)