From cce75cccafc3bbefc0d5ac5867c635024dc19bb8 Mon Sep 17 00:00:00 2001 From: Justyn Shull Date: Tue, 28 Jan 2025 02:59:09 -0600 Subject: [PATCH] Formatting --- docs/Changelog.md | 1 + silverbullet-ai.plug.yaml | 2 +- src/interfaces/EmbeddingProvider.ts | 6 +-- src/interfaces/Provider.ts | 7 ++- src/providers/gemini.ts | 12 ++++-- src/providers/ollama.ts | 4 +- src/providers/openai.ts | 8 ++-- src/proxyHandler.ts | 67 ++++++++++++++++++++--------- 8 files changed, 72 insertions(+), 35 deletions(-) diff --git a/docs/Changelog.md b/docs/Changelog.md index 9694d2b..7b296bf 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -6,6 +6,7 @@ This page is a brief overview of each version. - Better logging when SSE events have errors - Add support for retrieving list of models from openai and ollama providers - Add a Connectivity Test command and page to test whether an api is working +- Add experimental support for proxying all LLM calls through the silverbullet server --- ## 0.4.1 (2024-11-15) diff --git a/silverbullet-ai.plug.yaml b/silverbullet-ai.plug.yaml index 12c89bb..a5df867 100644 --- a/silverbullet-ai.plug.yaml +++ b/silverbullet-ai.plug.yaml @@ -6,7 +6,7 @@ functions: path: src/proxyHandler.ts:proxyHandler events: - http:request:/ai-proxy/* - + aiPromptSlashCommplete: path: src/prompts.ts:aiPromptSlashComplete events: diff --git a/src/interfaces/EmbeddingProvider.ts b/src/interfaces/EmbeddingProvider.ts index fa53dbd..82b5032 100644 --- a/src/interfaces/EmbeddingProvider.ts +++ b/src/interfaces/EmbeddingProvider.ts @@ -30,7 +30,7 @@ export abstract class AbstractEmbeddingProvider baseUrl: string, modelName: string, requireAuth: boolean = true, - proxyOnServer?: boolean + proxyOnServer?: boolean, ) { this.apiKey = apiKey; this.baseUrl = baseUrl; @@ -42,11 +42,11 @@ export abstract class AbstractEmbeddingProvider protected getUrl(path: string): string { // Remove any leading slashes from the path - path = path.replace(/^\/+/, ''); + path = path.replace(/^\/+/, ""); if (this.proxyOnServer) { // Remove any v1 prefix from the path if it exists - path = path.replace(/^v1\//, ''); + path = path.replace(/^v1\//, ""); return `/_/ai-proxy/${this.fullName}/${path}`; } else { return `${this.baseUrl}/${path}`; diff --git a/src/interfaces/Provider.ts b/src/interfaces/Provider.ts index 530f4fe..f7f185c 100644 --- a/src/interfaces/Provider.ts +++ b/src/interfaces/Provider.ts @@ -49,10 +49,13 @@ export abstract class AbstractProvider implements ProviderInterface { protected getUrl(path: string): string { // Remove leading slashes from the path - path = path.replace(/^\/+/, ''); + path = path.replace(/^\/+/, ""); if (this.proxyOnServer) { - console.log("Proxy on server, using proxy URL:", `/_/ai-proxy/${this.fullName}/${path}`); + console.log( + "Proxy on server, using proxy URL:", + `/_/ai-proxy/${this.fullName}/${path}`, + ); return `/_/ai-proxy/${this.fullName}/${path}`; } else { return `${this.baseUrl}/${path}`; diff --git a/src/providers/gemini.ts b/src/providers/gemini.ts index c7804d2..957739b 100644 --- a/src/providers/gemini.ts +++ b/src/providers/gemini.ts @@ -93,7 +93,9 @@ export class GeminiProvider extends AbstractProvider { const { messages, onDataReceived } = options; try { - const sseUrl = this.getUrl(`v1beta/models/${this.modelName}:streamGenerateContent?key=${this.apiKey}&alt=sse`); + const sseUrl = this.getUrl( + `v1beta/models/${this.modelName}:streamGenerateContent?key=${this.apiKey}&alt=sse`, + ); const headers: HttpHeaders = { "Content-Type": "application/json", @@ -165,7 +167,9 @@ export class GeminiProvider extends AbstractProvider { ); const response = await nativeFetch( - this.getUrl(`v1beta/models/${this.modelName}:generateContent?key=${this.apiKey}`), + this.getUrl( + `v1beta/models/${this.modelName}:generateContent?key=${this.apiKey}`, + ), { method: "POST", headers: { @@ -213,7 +217,9 @@ export class GeminiEmbeddingProvider extends AbstractEmbeddingProvider { } const response = await nativeFetch( - this.getUrl(`v1beta/models/${this.modelName}:embedContent?key=${this.apiKey}`), + this.getUrl( + `v1beta/models/${this.modelName}:embedContent?key=${this.apiKey}`, + ), { method: "POST", headers: headers, diff --git a/src/providers/ollama.ts b/src/providers/ollama.ts index be8a1a7..e7e9964 100644 --- a/src/providers/ollama.ts +++ b/src/providers/ollama.ts @@ -58,7 +58,7 @@ export class OllamaProvider extends AbstractProvider { // List models api isn't behind /v1/ like the other endpoints, but we don't want to force the user to change the config yet const response = await nativeFetch( - this.getUrl('api/tags').replace(/\/v1\/?/, ''), + this.getUrl("api/tags").replace(/\/v1\/?/, ""), { method: "GET", headers: headers, @@ -113,7 +113,7 @@ export class OllamaEmbeddingProvider extends AbstractEmbeddingProvider { } const response = await nativeFetch( - this.getUrl('api/embeddings'), + this.getUrl("api/embeddings"), { method: "POST", headers: headers, diff --git a/src/providers/openai.ts b/src/providers/openai.ts index 1812d88..cb45e9d 100644 --- a/src/providers/openai.ts +++ b/src/providers/openai.ts @@ -58,7 +58,7 @@ export class OpenAIProvider extends AbstractProvider { const { messages, onDataReceived, onResponseComplete } = options; try { - const sseUrl = this.getUrl('chat/completions'); + const sseUrl = this.getUrl("chat/completions"); const headers: HttpHeaders = { "Content-Type": "application/json", @@ -135,7 +135,7 @@ export class OpenAIProvider extends AbstractProvider { } const response = await nativeFetch( - this.getUrl('models'), + this.getUrl("models"), { method: "GET", headers: headers, @@ -173,7 +173,7 @@ export class OpenAIProvider extends AbstractProvider { }; const response = await nativeFetch( - this.getUrl('chat/completions'), + this.getUrl("chat/completions"), { method: "POST", headers: headers, @@ -232,7 +232,7 @@ export class OpenAIEmbeddingProvider extends AbstractEmbeddingProvider { } const response = await nativeFetch( - this.getUrl('embeddings'), + this.getUrl("embeddings"), { method: "POST", headers: headers, diff --git a/src/proxyHandler.ts b/src/proxyHandler.ts index 689e153..d5af2d9 100644 --- a/src/proxyHandler.ts +++ b/src/proxyHandler.ts @@ -8,7 +8,10 @@ import type { } from "@silverbulletmd/silverbullet/types"; import { Provider } from "./types.ts"; -export async function proxyHandler(request: EndpointRequest): Promise { +export async function proxyHandler( + request: EndpointRequest, +): Promise { + // TODO: Add a token or something to restrict access to this endpoint console.log("ai:proxyHandler called to handle path:", request.path); // Find the model name from the path (e.g., /ai-proxy/ollama-localhost-proxy/chat/completions) const pathParts = request.path.split("/"); @@ -29,47 +32,63 @@ export async function proxyHandler(request: EndpointRequest): Promise model.name === modelName + (model: ModelConfig) => model.name === modelName, ); if (!modelConfig) { - return new Response(`Model '${modelName}' not found in configuration`, { status: 404 }); + return new Response(`Model '${modelName}' not found in configuration`, { + status: 404, + }); } // Check if proxy is enabled for this model if (modelConfig.proxyOnServer === false) { - return new Response(`Proxy is disabled for model '${modelName}'`, { status: 403 }); + return new Response(`Proxy is disabled for model '${modelName}'`, { + status: 403, + }); } // TODO: Confirm this doesn't overwrite the user's selected model await configureSelectedModel(modelConfig); let baseUrl = modelConfig.baseUrl; - console.log("proxyHandler baseUrl:", baseUrl, "provider:", modelConfig.provider, "remainingPath:", remainingPath); + console.log( + "proxyHandler baseUrl:", + baseUrl, + "provider:", + modelConfig.provider, + "remainingPath:", + remainingPath, + ); // TODO: switch completions for ollama to use ollama api instead of openai api? if (modelConfig.provider === Provider.Ollama) { // For the list models endpoint, use /api/ without v1 prefix if (remainingPath.includes("models") || remainingPath == "api/tags") { - baseUrl = baseUrl.replace(/v1\/?/, ''); + baseUrl = baseUrl.replace(/v1\/?/, ""); } else { // Everything else should use openai-compatible endpoints under /v1/ - baseUrl = baseUrl.replace(/\/v1\/?$/, '') + '/v1/'; + baseUrl = baseUrl.replace(/\/v1\/?$/, "") + "/v1/"; } console.log("New baseUrl for ollama:", baseUrl); } // Use the baseUrl from the model config and append the remaining path - const apiUrl = baseUrl.endsWith('/') ? `${baseUrl}${remainingPath}` : `${baseUrl}/${remainingPath}`; + const apiUrl = baseUrl.endsWith("/") + ? `${baseUrl}${remainingPath}` + : `${baseUrl}/${remainingPath}`; // Forward the request to the appropriate LLM API with original headers const requestHeaders: Record = { "Content-Type": "application/json", "Accept": "*/*", }; - + // Copy other relevant headers, but skip problematic ones for (const [key, value] of Object.entries(request.headers)) { - if (!["host", "connection", "origin", "content-type", "content-length"].includes(key.toLowerCase())) { + if ( + !["host", "connection", "origin", "content-type", "content-length"] + .includes(key.toLowerCase()) + ) { requestHeaders[key] = value as string; } } @@ -78,7 +97,9 @@ export async function proxyHandler(request: EndpointRequest): Promise