diff --git a/.gitignore b/.gitignore index f1be53f..a86ef55 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ test-space/ .DS_Store silverbullet-ai.plug.js docs/_public +docs/Library/Core !docs/_plug SECRETS.md cov_profile diff --git a/docs/Changelog.md b/docs/Changelog.md index 08ffff8..effa89f 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,11 +3,19 @@ For the full changelog, please refer to the individual release notes on https:// This page is a brief overview of each version. --- -## Unreleased +## 0.4.0 (Unreleased) - Use a separate queue for indexing embeddings and summaries, to prevent blocking the main SB indexing thread - Refactor to use JSR for most Silverbullet imports, and lots of related changes - Reduced bundle size - Add support for [space-config](https://silverbullet.md/Space%20Config) +- Add support for [[Templated Prompts|Post Processor]] functions in [[Templated Prompts]]. +- AICore Library: Updated all library files to have the meta tag. +- AICore Library: Add space-script functions to be used as post processors: + - **indentOneLevel** - Indent entire response one level deeper than the previous line. + - **removeDuplicateStart** - Remove the first line from the response if it matches the line before the response started. + - **convertToBulletList** - Convert response to a markdown list. + - **convertToTaskList** - Convert response to a markdown list of tasks. +- AICore Library: Add `aiSplitTodo` slash command and [[^Library/AICore/AIPrompt/AI Split Task]] templated prompt to split a task into smaller subtasks. --- ## 0.3.2 diff --git a/docs/Library/AICore/AIPrompt/AI Create Space Script.md b/docs/Library/AICore/AIPrompt/AI Create Space Script.md index d4e0174..d5b6a9f 100644 --- a/docs/Library/AICore/AIPrompt/AI Create Space Script.md +++ b/docs/Library/AICore/AIPrompt/AI Create Space Script.md @@ -12,7 +12,7 @@ aiprompt: SilverBullet space script documentation: -[[!silverbullet.md/Space%20Script]] +[Space%20Script](https://silverbullet.md/Space%20Script) Using the above documentation, please create a space-script following the users description in the note below. Output only valid markdown with a code block using space-script. No explanations, code in a markdown space-script block only. Must contain **silverbullet.registerFunction** or **silverbullet.registerCommand**. Use syscalls where available, but only if you know for sure they exist. diff --git a/docs/Library/AICore/AIPrompt/AI Split Task.md b/docs/Library/AICore/AIPrompt/AI Split Task.md new file mode 100644 index 0000000..3e6f915 --- /dev/null +++ b/docs/Library/AICore/AIPrompt/AI Split Task.md @@ -0,0 +1,32 @@ +--- +tags: +- template +- aiPrompt +- meta + +description: "Split current todo into smaller manageable chunks." +aiprompt: + description: "Split current todo into smaller manageable chunks." + slashCommand: aiSplitTodo + chat: true + enrichMessages: true + insertAt: new-line-below + postProcessors: + - convertToBulletList + - convertToTaskList + - removeDuplicateStart + - indentOneLevel +--- + +**user**: [enrich:false] I’ll provide the note contents, and instructions. +**assistant**: What is the note title? +**user**: [enrich:true] {{@page.name}} +**assistant**: What are the note contents? +**user**: [enrich:true] +{{@currentPageText}} +**assistant**: What is the parent item the user is looking at? +**user**: [enrich:true] {{@parentItemText}} +**assistant**: What is the current item the user is looking at? Include the parent task if appropriate. +**user**: [enrich:true] {{@currentItemText}} +**assistant**: What are the instructions? +**user**: [enrich:false] Split the current task into smaller, more manageable, and well-defined tasks. Return one task per line. Keep the list of new tasks small. DO NOT return any existing items. \ No newline at end of file diff --git a/docs/Library/AICore/New Page/AI New Chat.md b/docs/Library/AICore/New Page/AI New Chat.md index 72bd08b..8e39db0 100644 --- a/docs/Library/AICore/New Page/AI New Chat.md +++ b/docs/Library/AICore/New Page/AI New Chat.md @@ -15,4 +15,4 @@ frontmatter: **assistant**: Hello, how can I help you? -**user**: |^| +**user**: |^| \ No newline at end of file diff --git a/docs/Library/AICore/Space Script/AI Query LLM.md b/docs/Library/AICore/Space Script/AI Query LLM.md index 5e430e4..1cfc282 100644 --- a/docs/Library/AICore/Space Script/AI Query LLM.md +++ b/docs/Library/AICore/Space Script/AI Query LLM.md @@ -1,6 +1,7 @@ --- tags: - spacescript +- meta description: > This space script allows you to use `{{queryAI(userPrompt, systemPrompt)}}` inside of a template. diff --git a/docs/Library/AICore/Space Script/AI Search Embeddings.md b/docs/Library/AICore/Space Script/AI Search Embeddings.md index f52504c..36cc9e5 100644 --- a/docs/Library/AICore/Space Script/AI Search Embeddings.md +++ b/docs/Library/AICore/Space Script/AI Search Embeddings.md @@ -1,6 +1,7 @@ --- tags: - spacescript +- meta description: > This space script allows you to use `{{searchEmbeddings(query)}}` inside of a template. A string diff --git a/docs/Library/AICore/Space Script/Convert to bullets.md b/docs/Library/AICore/Space Script/Convert to bullets.md new file mode 100644 index 0000000..5094d0e --- /dev/null +++ b/docs/Library/AICore/Space Script/Convert to bullets.md @@ -0,0 +1,34 @@ +--- +tags: +- spacescript +- meta + +description: > + This space script allows takes a string and converts each line to a bullet item in a list, if it is not already. +--- + + +```space-script +silverbullet.registerFunction({ name: "convertToBulletList" }, async (data) => { + const { response, lineBefore, lineAfter } = data; + const lines = response.split('\n'); + + // Get the indentation level of the line before + const indentationMatch = lineBefore.match(/^\s*/); + const indentation = indentationMatch ? indentationMatch[0] : ''; + + const bulletLines = lines.map(line => { + // Trim the line and add the indentation back + const trimmedLine = `${indentation}${line.trim()}`; + + // Add a bullet if the line doesn't already start with one + if (!trimmedLine.trim().startsWith('- ')) { + return `- ${trimmedLine.trim()}`; + } + return trimmedLine; + }); + + const result = bulletLines.join('\n'); + return result; +}); +``` \ No newline at end of file diff --git a/docs/Library/AICore/Space Script/Convert to task list.md b/docs/Library/AICore/Space Script/Convert to task list.md new file mode 100644 index 0000000..b2ba0ab --- /dev/null +++ b/docs/Library/AICore/Space Script/Convert to task list.md @@ -0,0 +1,28 @@ +--- +tags: +- spacescript +- meta + +description: > + This space script takes a string, and makes sure each line is a markdown task. +--- + +```space-script +silverbullet.registerFunction({ name: "convertToTaskList" }, async (data) => { + const { response } = data; + const lines = response.split('\n'); + const result = lines.map(line => { + if (/^\s*-\s*\[\s*[xX]?\s*\]/.test(line)) { + // Already a task + return line.trim(); + } + if (/^\s*-/.test(line)) { + // bullet, but not a task + return `- [ ] ${line.slice(1).trim()}`; + } + // everything else, should be a non list item + return `- [ ] ${line.trim()}`; + }).join('\n'); + return result; +}); +``` \ No newline at end of file diff --git a/docs/Library/AICore/Space Script/Indent lines.md b/docs/Library/AICore/Space Script/Indent lines.md new file mode 100644 index 0000000..3f14ef4 --- /dev/null +++ b/docs/Library/AICore/Space Script/Indent lines.md @@ -0,0 +1,36 @@ +--- +tags: +- spacescript +- meta + +description: > + This space script allows takes a string and indents each line one level, compared to the lineBefore. +--- + +```space-script +silverbullet.registerFunction({ name: "indentOneLevel" }, async (data) => { + const { response, lineBefore, lineCurrent } = data; + console.log(data); + + // Function to determine the indentation of a line + const getIndentation = (line) => line.match(/^\s*/)[0]; + + // Determine the maximum indentation of lineBefore and lineCurrent + const maxIndentation = getIndentation(lineBefore).length > getIndentation(lineCurrent).length + ? getIndentation(lineBefore) + : getIndentation(lineCurrent); + + // Define additional indentation level + const additionalIndentation = ' '; + + // Compute new indentation + const newIndentation = maxIndentation + additionalIndentation; + + // Apply new indentation to all lines in the response + const indentedLines = response.split('\n').map(line => `${newIndentation}${line.trim()}`).join('\n'); + + console.log("indentedLines:", indentedLines); + + return indentedLines; +}); +``` \ No newline at end of file diff --git a/docs/Library/AICore/Space Script/Remove Duplicate Start.md b/docs/Library/AICore/Space Script/Remove Duplicate Start.md new file mode 100644 index 0000000..e9b8252 --- /dev/null +++ b/docs/Library/AICore/Space Script/Remove Duplicate Start.md @@ -0,0 +1,25 @@ +--- +tags: +- spacescript +- meta + +description: > + This space script checks lineBefore against the first line of the response and deletes it if its a duplicate. +--- + + +```space-script +silverbullet.registerFunction({ name: "removeDuplicateStart" }, async (data) => { + console.log(data); + const { response, lineBefore, lineCurrent } = data; + const lines = response.split('\n'); + + // Check if the first line matches either the previous or current line, and remove it if it does + if ((lines[0].trim() == lineBefore.trim()) || (lines[0].trim() == lineCurrent.trim())) { + lines.shift(); + } + console.log(lines); + + return lines.join('\n'); +}); +``` \ No newline at end of file diff --git a/docs/Templated Prompts.md b/docs/Templated Prompts.md index 6e42b09..04af487 100644 --- a/docs/Templated Prompts.md +++ b/docs/Templated Prompts.md @@ -16,7 +16,7 @@ To be a templated prompt, the note must have the following frontmatter: - Optionally, `aiprompt.systemPrompt` can be specified to override the system prompt - Optionally, `aiprompt.chat` can be specified to treat the template as a multi-turn chat instead of single message - Optionally, `aiprompt.enrichMessages` can be set to true to enrich each chat message -- +- Optionally, `aiprompt.postProcessors` can be set to a list of space-script function names to manipulate text returned by the llm For example, here is a templated prompt to summarize the current note and insert the summary at the cursor: @@ -65,6 +65,23 @@ Everything below is the content of the note: {{readPage(@page.ref)}} ``` + +## Template Metadata + +As of version 0.4.0, the following global metadata is available for use inside of an aiPrompt template: + +* **`page`**: Metadata about the current page. +* **`currentItemBounds`**: Start and end positions of the current item. An item may be a bullet point or task. +* **`currentItemText`**: Full text of the current item. +* **`currentLineNumber`**: Line number of the current cursor position. +* **`lineStartPos`**: Starting character position of the current line. +* **`lineEndPos`**: Ending character position of the current line. +* **`currentPageText`**: Entire text of the current page. +* **`parentItemBounds`**: Start and end positions of the parent item. +* **`parentItemText`**: Full text of the parent item. A parent item may contain child items. + +All of these can be accessed by prefixing the variable name with `@`, like `@lineEndPos` or `@currentLineNumber`. + ## Chat-style prompts As of version 0.3.0, `aiprompt.chat` can be set to true in the template frontmatter to treat the template similar to a page using [[Commands/AI: Chat on current page]]. @@ -96,4 +113,61 @@ Everything below is the content of the note: These messages will be parsed into multiple chat messages when calling the LLM’s api. Only the response from the LLM will be included in the note where the template is triggered from. -The `enrich` attribute can also be toggled on or off per message. By default it is either disabled or goes off of the `aiPrompt.enrichMessages` frontmatter attribute. Assistant and system messages are never enriched. \ No newline at end of file +The `enrich` attribute can also be toggled on or off per message. By default it is either disabled or goes off of the `aiPrompt.enrichMessages` frontmatter attribute. Assistant and system messages are never enriched. + +## Post Processors + +As of version 0.4.0, `aiPrompt.postProcessors` can be set to a list of space-script function names like in the example below. Once the LLM finishes streaming its response, the entire response will be sent to each post processor function in order. + +Each function must accept a single data parameter. Currently, the parameter follows this typing: + +```javascript +export type PostProcessorData = { + // The full response text + response: string; + // The line before where the response was inserted + lineBefore: string; + // The line after where the response was inserted + lineAfter: string; + // The line where the cursor was before the response was inserted + lineCurrent: string; +}; +``` + +A simple post processing function looks like this: + +```javascript +silverbullet.registerFunction({ name: "aiFooBar" }, async (data) => { + + // Extract variables from PostProcessorData + const { response, lineBefore, lineCurrent, lineAfter } = data; + + // Put the current response between FOO and BAR and return it + const newResponse = `FOO ${response} BAR`; + return newResponse +} +``` + +This function could be used in a template prompt like this: + +```yaml +--- +tags: +- template +- aiPrompt +- meta + +description: "Generate a random pet name" +aiprompt: + description: "Generate a random pet name." + slashCommand: aiGeneratePetName + insertAt: cursor + postProcessors: + - aiFooBar +--- + +Generate a random name for a pet. Only generate a single name. Return nothing but that name. +``` + +Running this prompt, the LLM may return `Henry` as the name and then aiFooBar will transform it into `FOO Henry BAR` which is what will ultimately be placed in the note the templated was executed from. + diff --git a/scripts/create-test-space.sh b/scripts/create-test-space.sh index 13ebdd9..5f2f20d 100755 --- a/scripts/create-test-space.sh +++ b/scripts/create-test-space.sh @@ -12,6 +12,10 @@ cd "$spacedir"/_plug ln -sv ../../silverbullet-ai.plug.js* . cd - +cd "$spacedir" +ln -sv ../docs/Library . +cd - + # This is a local file outside of the sbai directory cp -v ../test-spaces/SECRETS.md "$spacedir"/ cp -v ../test-spaces/SETTINGS.md "$spacedir"/ \ No newline at end of file diff --git a/src/editorUtils.ts b/src/editorUtils.ts index 33d5c52..fdea184 100644 --- a/src/editorUtils.ts +++ b/src/editorUtils.ts @@ -1,6 +1,6 @@ import { editor } from "@silverbulletmd/silverbullet/syscalls"; -async function getSelectedText() { +export async function getSelectedText() { const selectedRange = await editor.getSelection(); let selectedText = ""; if (selectedRange.from === selectedRange.to) { @@ -17,7 +17,7 @@ async function getSelectedText() { }; } -async function getSelectedTextOrNote() { +export async function getSelectedTextOrNote() { const selectedTextInfo = await getSelectedText(); const pageText = await editor.getText(); if (selectedTextInfo.text === "") { @@ -36,9 +36,42 @@ async function getSelectedTextOrNote() { }; } -async function getPageLength() { +export async function getPageLength() { const pageText = await editor.getText(); return pageText.length; } -export { getPageLength, getSelectedText, getSelectedTextOrNote }; +export function getLineNumberAtPos(text: string, pos: number): number { + const lines = text.split("\n"); + let currentPos = 0; + for (let i = 0; i < lines.length; i++) { + if (currentPos <= pos && pos < currentPos + lines[i].length + 1) { + return i; + } + currentPos += lines[i].length + 1; // +1 for the newline character + } + return -1; +} + +export function getLine(text: string, lineNumber: number): string { + const lines = text.split("\n"); + if (lineNumber < 0 || lineNumber >= lines.length) { + return ""; + } + return lines[lineNumber]; +} + +export function getLineOfPos(text: string, pos: number): string { + const lineNumber = getLineNumberAtPos(text, pos); + return getLine(text, lineNumber); +} + +export function getLineBefore(text: string, pos: number): string { + const lineNumber = getLineNumberAtPos(text, pos); + return getLine(text, lineNumber - 1); +} + +export function getLineAfter(text: string, pos: number): string { + const lineNumber = getLineNumberAtPos(text, pos); + return getLine(text, lineNumber + 1); +} diff --git a/src/interfaces/Provider.ts b/src/interfaces/Provider.ts index 1e8ce55..9a06519 100644 --- a/src/interfaces/Provider.ts +++ b/src/interfaces/Provider.ts @@ -1,6 +1,11 @@ -import { editor } from "@silverbulletmd/silverbullet/syscalls"; -import { getPageLength } from "../editorUtils.ts"; -import { ChatMessage, StreamChatOptions } from "../types.ts"; +import { editor, system } from "@silverbulletmd/silverbullet/syscalls"; +import { + getLineAfter, + getLineBefore, + getLineOfPos, + getPageLength, +} from "../editorUtils.ts"; +import { ChatMessage, PostProcessorData, StreamChatOptions } from "../types.ts"; import { enrichChatMessages } from "../utils.ts"; export interface ProviderInterface { @@ -44,12 +49,14 @@ export abstract class AbstractProvider implements ProviderInterface { options: StreamChatOptions, cursorStart: number, ): Promise { - const { onDataReceived } = options; + const { onDataReceived, onResponseComplete, postProcessors } = options; const loadingMessage = "🤔 Thinking … "; let cursorPos = cursorStart ?? await getPageLength(); await editor.insertAtPos(loadingMessage, cursorPos); let stillLoading = true; + const startOfResponse = cursorPos; + const onData = (data: string) => { try { if (!data) { @@ -60,7 +67,7 @@ export abstract class AbstractProvider implements ProviderInterface { if (["`", "-", "*"].includes(data.charAt(0))) { // Sometimes we get a response that is _only_ a code block, or a markdown list/etc // To let SB parse them better, we just add a new line before rendering it - console.log("First character of response is:", data.charAt(0)); + // console.log("First character of response is:", data.charAt(0)); data = "\n" + data; } editor.replaceRange( @@ -83,7 +90,45 @@ export abstract class AbstractProvider implements ProviderInterface { } }; - await this.chatWithAI({ ...options, onDataReceived: onData }); + const onDataComplete = async (data: string) => { + console.log("Response complete:", data); + const endOfResponse = startOfResponse + data.length; + console.log("Start of response:", startOfResponse); + console.log("End of response:", endOfResponse); + console.log("Full response:", data); + console.log("Post-processors:", postProcessors); + let newData = data; + + if (postProcessors) { + const pageText = await editor.getText(); + const postProcessorData: PostProcessorData = { + response: data, + lineBefore: getLineBefore(pageText, startOfResponse), + lineCurrent: getLineOfPos(pageText, startOfResponse), + lineAfter: getLineAfter(pageText, endOfResponse), + }; + for (const processor of postProcessors) { + console.log("Applying post-processor:", processor); + newData = await system.invokeSpaceFunction( + processor, + postProcessorData, + ); + } + // if (newData !== data) { + // console.log("New data:", newData); + console.log("Data changed by post-processors, updating editor"); + editor.replaceRange(startOfResponse, endOfResponse, newData); + // } + } + + if (onResponseComplete) onResponseComplete(data); + }; + + await this.chatWithAI({ + ...options, + onDataReceived: onData, + onResponseComplete: onDataComplete, + }); } async singleMessageChat( diff --git a/src/prompts.ts b/src/prompts.ts index 5cf6f78..0e45173 100644 --- a/src/prompts.ts +++ b/src/prompts.ts @@ -1,5 +1,10 @@ import { extractFrontmatter } from "@silverbulletmd/silverbullet/lib/frontmatter"; -import { editor, markdown, space } from "@silverbulletmd/silverbullet/syscalls"; +import { + editor, + markdown, + space, + system, +} from "@silverbulletmd/silverbullet/syscalls"; import { queryObjects } from "./utils.ts"; import { renderTemplate } from "https://deno.land/x/silverbullet@0.9.4/plugs/template/api.ts"; import type { @@ -14,7 +19,7 @@ import { enrichChatMessages, supportsPlugSlashComplete, } from "./utils.ts"; -import { ChatMessage } from "./types.ts"; +import { ChatMessage, PostProcessorData } from "./types.ts"; // This only works in 0.7.2+, see https://github.com/silverbulletmd/silverbullet/issues/742 export async function aiPromptSlashComplete( @@ -29,7 +34,7 @@ export async function aiPromptSlashComplete( return { options: allTemplates.map((template) => { const aiPromptTemplate = template.aiprompt!; - console.log("ai prompt template: ", aiPromptTemplate); + // console.log("ai prompt template: ", aiPromptTemplate); return { label: aiPromptTemplate.slashCommand, @@ -71,7 +76,7 @@ export async function insertAiPromptFromTemplate( insertAt: templateObj.aiprompt.insertAt || "cursor", chat: templateObj.aiprompt.chat || false, enrichMessages: templateObj.aiprompt.enrichMessages || false, - // parseAs: templateObj.aiprompt.parseAs || "markdown", + postProcessors: templateObj.aiprompt.postProcessors || [], }; }), `Select the template to use as the prompt. The prompt will be rendered and sent to the LLM model.`, @@ -89,6 +94,7 @@ export async function insertAiPromptFromTemplate( insertAt: aiprompt.insertAt || "cursor", chat: aiprompt.chat || false, enrichMessages: aiprompt.enrichMessages || false, + postProcessors: aiprompt.postProcessors || [], }; } @@ -103,6 +109,14 @@ export async function insertAiPromptFromTemplate( "cursor", "page-start", "page-end", + "start-of-line", + "end-of-line", + // Item can mean either a list item or a task + "start-of-item", + "end-of-item", + "new-line-above", + "new-line-below", + "replace-line", // "frontmatter", // "modal", // "replace", @@ -136,6 +150,60 @@ export async function insertAiPromptFromTemplate( return; } + let currentPageText, + currentLineNumber, + lineStartPos, + lineEndPos, + currentItemBounds, + currentItemText, + parentItemBounds, + parentItemText; + try { + // This is all to get the current line number and position + // It probably could be a new editor.syscall or something that uses the tree instead + currentPageText = await editor.getText(); + const curCursorPos = await editor.getCursor(); + const lines = currentPageText.split("\n"); + currentLineNumber = + currentPageText.substring(0, curCursorPos).split("\n").length; + lineStartPos = curCursorPos - + (currentPageText.substring(0, curCursorPos).split("\n").pop()?.length || + 0); + lineEndPos = lineStartPos + lines[currentLineNumber - 1].length; + + // Also get the current item if we need it + currentItemBounds = await system.invokeFunction( + "editor.determineItemBounds", + currentPageText, + curCursorPos, + undefined, + true, + ); + currentItemText = currentPageText.slice( + currentItemBounds.from, + currentItemBounds.to, + ); + + parentItemBounds = await system.invokeFunction( + "editor.determineItemBounds", + currentPageText, + curCursorPos, + 0, + true, + ); + parentItemText = currentPageText.slice( + parentItemBounds.from, + parentItemBounds.to, + ); + } catch (error) { + console.error("Error fetching current page text or cursor position", error); + await editor.flashNotification( + "Error fetching current page text or cursor position", + "error", + ); + return; + } + let cursorPos; switch (selectedTemplate.insertAt) { case "page-start": @@ -156,6 +224,32 @@ export async function insertAiPromptFromTemplate( case "replace": // TODO: Replace selection break; + case "replace-line": + cursorPos = lineStartPos; + await editor.replaceRange(lineStartPos, lineEndPos, ""); + break; + case "start-of-line": + cursorPos = lineStartPos; + break; + case "end-of-line": + cursorPos = lineEndPos; + break; + case "new-line-above": + cursorPos = lineStartPos; + await editor.insertAtPos("\n", cursorPos); + cursorPos += 1; + break; + case "new-line-below": + cursorPos = lineEndPos; + await editor.insertAtPos("\n", cursorPos); + cursorPos += 1; + break; + case "start-of-item": + cursorPos = currentItemBounds.from; + break; + case "end-of-item": + cursorPos = currentItemBounds.to; + break; case "cursor": default: cursorPos = await editor.getCursor(); @@ -168,12 +262,25 @@ export async function insertAiPromptFromTemplate( console.log("templatetext: ", templateText); let messages: ChatMessage[] = []; + const globalMetadata = { + page: pageMeta, + currentItemBounds: currentItemBounds, + currentItemText: currentItemText, + currentLineNumber: currentLineNumber, + lineStartPos: lineStartPos, + lineEndPos: lineEndPos, + currentPageText: currentPageText, + parentItemBounds: parentItemBounds, + parentItemText: parentItemText, + }; if (!selectedTemplate.chat) { // non-multi-chat template - const renderedTemplate = await renderTemplate(templateText, pageMeta, { - page: pageMeta, - }); + const renderedTemplate = await renderTemplate( + templateText, + pageMeta, + globalMetadata, + ); console.log("Rendered template:", renderedTemplate); if (selectedTemplate.systemPrompt) { messages.push({ @@ -195,7 +302,7 @@ export async function insertAiPromptFromTemplate( }); } if (selectedTemplate.chat && selectedTemplate.enrichMessages) { - messages = await enrichChatMessages(messages); + messages = await enrichChatMessages(messages, globalMetadata); } } @@ -203,5 +310,6 @@ export async function insertAiPromptFromTemplate( await currentAIProvider.streamChatIntoEditor({ messages: messages, stream: true, + postProcessors: selectedTemplate.postProcessors, }, cursorPos); } diff --git a/src/providers/ollama.ts b/src/providers/ollama.ts index 441f24b..06e3d08 100644 --- a/src/providers/ollama.ts +++ b/src/providers/ollama.ts @@ -31,12 +31,13 @@ export class OllamaProvider extends AbstractProvider { } async chatWithAI( - { messages, stream, onDataReceived }: StreamChatOptions, + { messages, stream, onDataReceived, onResponseComplete }: StreamChatOptions, ): Promise { return await this.openaiProvider.chatWithAI({ messages, stream, onDataReceived, + onResponseComplete, }); } } diff --git a/src/providers/openai.ts b/src/providers/openai.ts index 7a356d1..8b5159c 100644 --- a/src/providers/openai.ts +++ b/src/providers/openai.ts @@ -12,6 +12,7 @@ type StreamChatOptions = { messages: Array; stream?: boolean; onDataReceived?: (data: any) => void; + onResponseComplete?: (data: any) => void; cursorStart?: number; cursorFollow?: boolean; scrollIntoView?: boolean; @@ -38,17 +39,21 @@ export class OpenAIProvider extends AbstractProvider { } async chatWithAI( - { messages, stream, onDataReceived }: StreamChatOptions, + { messages, stream, onDataReceived, onResponseComplete }: StreamChatOptions, ): Promise { if (stream) { - return await this.streamChat({ messages, onDataReceived }); + return await this.streamChat({ + messages, + onDataReceived, + onResponseComplete, + }); } else { return await this.nonStreamingChat(messages); } } async streamChat(options: StreamChatOptions): Promise { - const { messages, onDataReceived } = options; + const { messages, onDataReceived, onResponseComplete } = options; try { const sseUrl = `${this.baseUrl}/chat/completions`; @@ -79,6 +84,7 @@ export class OpenAIProvider extends AbstractProvider { try { if (e.data == "[DONE]") { source.close(); + if (onResponseComplete) onResponseComplete(fullMsg); return fullMsg; } else { const data = JSON.parse(e.data); @@ -95,6 +101,7 @@ export class OpenAIProvider extends AbstractProvider { source.addEventListener("end", function () { source.close(); + if (onResponseComplete) onResponseComplete(fullMsg); return fullMsg; }); diff --git a/src/types.ts b/src/types.ts index 5fb8ce9..cc6eb53 100644 --- a/src/types.ts +++ b/src/types.ts @@ -8,6 +8,8 @@ export type StreamChatOptions = { messages: Array; stream: boolean; onDataReceived?: (data: any) => void; + onResponseComplete?: (data: any) => void; + postProcessors?: string[]; }; export type ImageGenerationOptions = { @@ -148,3 +150,14 @@ export type EmbeddingModelConfig = { requireAuth: boolean; baseUrl?: string; }; + +export type PostProcessorData = { + // The full response text + response: string; + // The line before where the response was inserted + lineBefore: string; + // The line after where the response was inserted + lineAfter: string; + // The line where the cursor was before the response was inserted + lineCurrent: string; +}; diff --git a/src/utils.ts b/src/utils.ts index 02d27ee..f4a6978 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -175,6 +175,7 @@ export async function supportsServerProxyCall(): Promise { */ export async function enrichChatMessages( messages: ChatMessage[], + globalMetadata?: Record, ): Promise { const enrichedMessages: ChatMessage[] = []; let currentPage, pageMeta; @@ -244,6 +245,7 @@ export async function enrichChatMessages( pageMeta, { page: pageMeta, + ...globalMetadata, }, ); enrichedContent = templateResult;