Skip to content

Commit

Permalink
updating configuration tree view (#1088)
Browse files Browse the repository at this point in the history
* simplify apis

* show models

* missing docs

* fix model listing

* handle no-url

* don't sort

* show errors

* properly resolve fallbackt ools

* add english prompt

* use repo config

* add dispatch

* normalize configs

* updated demo prompts
  • Loading branch information
pelikhan authored Feb 2, 2025
1 parent 10279ca commit 1d4b443
Show file tree
Hide file tree
Showing 30 changed files with 215 additions and 186 deletions.
6 changes: 6 additions & 0 deletions .github/workflows/genai-blog-post.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ on:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
GENAISCRIPT_DEFAULT_REASONING_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_MODEL }}
GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_MODEL }}
GENAISCRIPT_DEFAULT_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_VISION_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_VISION_MODEL }}
jobs:
build:
runs-on: ubuntu-latest
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/genai-commander.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@ on:
issue_comment:
types: [created]
env:
# Configure default GenAIScript models
# using Ollama's models
GENAISCRIPT_DEFAULT_MODEL: ollama:qwen2.5-coder:7b
GENAISCRIPT_DEFAULT_SMALL_MODEL: ollama:qwen2.5-coder:1.5b
GENAISCRIPT_DEFAULT_VISION_MODEL: ollama:llama3.2-vision:11b
GENAISCRIPT_DEFAULT_REASONING_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_MODEL }}
GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_MODEL }}
GENAISCRIPT_DEFAULT_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_VISION_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_VISION_MODEL }}
jobs:
pr_commented:
# must be PR and have a comment starting with /genai
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/genai-investigator.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@ permissions:
actions: read
pull-requests: write
env:
# Configure default GenAIScript models
# using Ollama's models
GENAISCRIPT_DEFAULT_MODEL: ollama:qwen2.5-coder:7b
GENAISCRIPT_DEFAULT_SMALL_MODEL: ollama:qwen2.5-coder:1.5b
GENAISCRIPT_DEFAULT_VISION_MODEL: ollama:llama3.2-vision:11b
GENAISCRIPT_DEFAULT_REASONING_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_MODEL }}
GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_MODEL }}
GENAISCRIPT_DEFAULT_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_VISION_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_VISION_MODEL }}
jobs:
investigate:
# Only run this job if the workflow run concluded with a failure
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/genai-issue-review.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@ concurrency:
group: issues-${{ github.event.issue.number }}
cancel-in-progress: true
env:
# Configure default GenAIScript models
# using Ollama's models
GENAISCRIPT_DEFAULT_MODEL: ollama:qwen2.5-coder:7b
GENAISCRIPT_DEFAULT_SMALL_MODEL: ollama:qwen2.5-coder:1.5b
GENAISCRIPT_DEFAULT_VISION_MODEL: ollama:llama3.2-vision:11b
GENAISCRIPT_DEFAULT_REASONING_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_MODEL }}
GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_MODEL }}
GENAISCRIPT_DEFAULT_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_VISION_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_VISION_MODEL }}
jobs:
build:
runs-on: ubuntu-latest
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/genai-pr-commit-review.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@ on:
- "packages/cli/**/*"
- "packages/samples/**/*"
env:
# Configure default GenAIScript models
# using Ollama's models
GENAISCRIPT_DEFAULT_MODEL: ollama:qwen2.5-coder:7b
GENAISCRIPT_DEFAULT_SMALL_MODEL: ollama:qwen2.5-coder:1.5b
GENAISCRIPT_DEFAULT_VISION_MODEL: ollama:llama3.2-vision:11b
GENAISCRIPT_DEFAULT_REASONING_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_MODEL }}
GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_MODEL }}
GENAISCRIPT_DEFAULT_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_VISION_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_VISION_MODEL }}
jobs:
build:
runs-on: ubuntu-latest
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/genai-pr-docs-commit-review.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ on:
- docs/**/*.md
- docs/**/*.mdx
env:
# Configure default GenAIScript models
# using Ollama's models
GENAISCRIPT_DEFAULT_MODEL: ollama:qwen2.5-coder:7b
GENAISCRIPT_DEFAULT_SMALL_MODEL: ollama:qwen2.5-coder:1.5b
GENAISCRIPT_DEFAULT_VISION_MODEL: ollama:llama3.2-vision:11b
GENAISCRIPT_DEFAULT_REASONING_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_MODEL }}
GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_MODEL }}
GENAISCRIPT_DEFAULT_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_VISION_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_VISION_MODEL }}
jobs:
build:
runs-on: ubuntu-latest
Expand Down
15 changes: 8 additions & 7 deletions .github/workflows/genai-pr-review.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
name: genai pull request review
on:
workflow_dispatch:
pull_request:
types: [opened, ready_for_review, reopened]
paths:
Expand All @@ -12,11 +13,11 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
# Configure default GenAIScript models
# using Ollama's models
GENAISCRIPT_DEFAULT_MODEL: ollama:qwen2.5-coder:7b
GENAISCRIPT_DEFAULT_SMALL_MODEL: ollama:qwen2.5-coder:1.5b
GENAISCRIPT_DEFAULT_VISION_MODEL: ollama:llama3.2-vision:11b
GENAISCRIPT_DEFAULT_REASONING_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_MODEL }}
GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_MODEL }}
GENAISCRIPT_DEFAULT_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_VISION_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_VISION_MODEL }}
jobs:
build:
runs-on: ubuntu-latest
Expand All @@ -40,11 +41,11 @@ jobs:
run: git fetch origin && git pull origin main:main
- name: genaiscript pr-describe
continue-on-error: true
run: node packages/cli/built/genaiscript.cjs run pr-describe --out ./temp/genai/pr-describe -prd --out-trace $GITHUB_STEP_SUMMARY
run: node packages/cli/built/genaiscript.cjs run pr-describe --out ./temp/genai/pr-describe -prd -m reasoning --out-trace $GITHUB_STEP_SUMMARY
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: genaiscript pr-review
run: node packages/cli/built/genaiscript.cjs run pr-review --out ./temp/genai/pr-review -prc --out-trace $GITHUB_STEP_SUMMARY
run: node packages/cli/built/genaiscript.cjs run pr-review --out ./temp/genai/pr-review -prc -m reasoning --out-trace $GITHUB_STEP_SUMMARY
continue-on-error: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
Expand Down
6 changes: 6 additions & 0 deletions .github/workflows/ollama.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,12 @@ on:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-ollama
cancel-in-progress: true
env:
GENAISCRIPT_DEFAULT_REASONING_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_MODEL }}
GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_REASONING_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_MODEL }}
GENAISCRIPT_DEFAULT_SMALL_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_SMALL_MODEL }}
GENAISCRIPT_DEFAULT_VISION_MODEL: ${{ vars.GENAISCRIPT_DEFAULT_VISION_MODEL }}
jobs:
tests:
runs-on: ubuntu-latest
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ const { files } = await retrieval.vectorSearch("cats", "**/*.md")

### 🐙 GitHub Models and GitHub Copilot

Run models through [GitHub Models](https://microsoft.github.io/genaiscript/getting-started/configuration#github) or [GitHub Copilot](https://microsoft.github.io/genaiscript/getting-started/configuration/#github-copilot).
Run models through [GitHub Models](https://microsoft.github.io/genaiscript/getting-started/configuration#github) or [GitHub Copilot](https://microsoft.github.io/genaiscript/getting-started/configuration/#github_copilot_chat).

```js
script({ ..., model: "github:gpt-4o" })
Expand Down
16 changes: 13 additions & 3 deletions docs/src/content/docs/getting-started/configuration.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -833,7 +833,7 @@ script({ model: "google:gemini-1.5-pro-latest" })

<LLMProviderFeatures provider="google" />

## GitHub Copilot Chat Models <a id="github-copilot" href=""></a>
## GitHub Copilot Chat Models <a id="github_copilot_chat" href=""></a>

If you have access to **GitHub Copilot Chat in Visual Studio Code**,
GenAIScript will be able to leverage those [language models](https://code.visualstudio.com/api/extension-guides/language-model) as well.
Expand Down Expand Up @@ -1256,7 +1256,7 @@ script({

<LLMProviderFeatures provider="deepseek" />

## LMStudio
## LM Studio <a href="" id="lmstudio" />

The `lmstudio` provider connects to the [LMStudio](https://lmstudio.ai/) headless server.
and allows to run local LLMs.
Expand Down Expand Up @@ -1319,7 +1319,7 @@ script({

<LLMProviderFeatures provider="lmstudio" />

### LMStudio and Hugging Face Models
### LM Studio and Hugging Face Models

Follow [this guide](https://huggingface.co/blog/yagilb/lms-hf) to load Hugging Face models into LMStudio.

Expand Down Expand Up @@ -1461,6 +1461,16 @@ OPENROUTER_SITE_URL=... # populates HTTP-Referer header
OPENROUTER_SITE_NAME=... # populate X-Title header
```

## LiteLLM

The [LiteLLM](https://docs.litellm.ai/) proxy gateway provides a OpenAI compatible API for running models locally.
Configure the `LITELLM_...` keys to set the key and optionally the base url.

```txt title=".env"
LITELLM_API_KEY="..."
#LITELLM_API_BASE="..."
```

## Hugging Face Transformer.js <a href="" id="transformers" />

This `transformers` provider runs models on device using [Hugging Face Transformers.js](https://huggingface.co/docs/transformers.js/index).
Expand Down
2 changes: 1 addition & 1 deletion docs/src/content/docs/index.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ const res = await safety.detectPromptInjection(env.vars.input)
<Card title="GitHub Models and GitHub Copilot" icon="github">

Run models through GitHub using [GitHub Models](/genaiscript/getting-started/configuration#github)
or [GitHub Copilot](/genaiscript/getting-started/configuration/#github-copilot).
or [GitHub Copilot](/genaiscript/getting-started/configuration/#github_copilot_chat).

```js wrap
script({ ..., model: "github:gpt-4o" })
Expand Down
18 changes: 18 additions & 0 deletions docs/src/content/docs/reference/scripts/system.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -722,6 +722,24 @@ DIFF ./file4.ts:
`````


### `system.english`

Use english output





`````js wrap title="system.english"
system({
title: "Use english output",
})
$`## English output
Use English in the output of the system. Use English in the reasoning output as well.`

`````


### `system.explanations`

Explain your answers
Expand Down
2 changes: 1 addition & 1 deletion packages/cli/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ const { files } = await retrieval.vectorSearch("cats", "**/*.md")

### 🐙 GitHub Models and GitHub Copilot

Run models through [GitHub Models](https://microsoft.github.io/genaiscript/getting-started/configuration#github) or [GitHub Copilot](https://microsoft.github.io/genaiscript/getting-started/configuration/#github-copilot-in-visual-studio-code).
Run models through [GitHub Models](https://microsoft.github.io/genaiscript/getting-started/configuration#github) or [GitHub Copilot](https://microsoft.github.io/genaiscript/getting-started/configuration/#github_copilot_chat).

```js
script({ ..., model: "github:gpt-4o" })
Expand Down
2 changes: 1 addition & 1 deletion packages/cli/src/run.ts
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ export async function runScriptInternal(
const removeOut = options.removeOut
const cancellationToken = options.cancellationToken
const jsSource = options.jsSource
const fallbackTools = !!options.fallbackTools
const fallbackTools = options.fallbackTools
const logprobs = options.logprobs
const topLogprobs = normalizeInt(options.topLogprobs)
const fenceFormat = options.fenceFormat
Expand Down
2 changes: 2 additions & 0 deletions packages/cli/src/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,9 @@ export async function startServer(options: {
providers: await resolveLanguageModelConfigurations(undefined, {
token: false,
error: true,
models: true,
}),
modelAliases: runtimeHost.modelAliases,
remote: remote
? {
url: remote,
Expand Down
19 changes: 12 additions & 7 deletions packages/core/src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import { deleteEmptyValues } from "./cleaners"
import { errorMessage } from "./error"
import schema from "../../../docs/public/schemas/config.json"
import defaultConfig from "./config.json"
import { CancellationOptions } from "./cancellation"

export async function resolveGlobalConfiguration(
dotEnvPath?: string
Expand Down Expand Up @@ -100,7 +101,11 @@ export async function resolveGlobalConfiguration(
*/
export async function resolveLanguageModelConfigurations(
provider: string,
options?: { token?: boolean; error?: boolean; models?: boolean }
options?: {
token?: boolean
error?: boolean
models?: boolean
} & CancellationOptions
): Promise<ResolvedLanguageModelConfiguration[]> {
const { token, error, models } = options || {}
const res: ResolvedLanguageModelConfiguration[] = []
Expand All @@ -118,19 +123,19 @@ export async function resolveLanguageModelConfigurations(
if (conn) {
// Mask the token if the option is set
let listError = ""
if (!token && conn.token) conn.token = "***"
if (models) {
const lm = await resolveLanguageModel(modelProvider.id)
if (lm.listModels) {
const models = await lm.listModels(conn, {})
const models = await lm.listModels(conn, options)
if (models.ok) conn.models = models.models
else
listError =
errorMessage(listError) ||
"failed to llist models"
errorMessage(models.error) ||
"failed to list models"
}
}
if (!listError || error)
if (!token && conn.token) conn.token = "***"
if (!listError || error || provider)
res.push(
deleteEmptyValues({
provider: conn.provider,
Expand All @@ -143,7 +148,7 @@ export async function resolveLanguageModelConfigurations(
)
}
} catch (e) {
if (error)
if (error || provider)
// Capture and store any errors encountered
res.push({
provider: modelProvider.id,
Expand Down
4 changes: 2 additions & 2 deletions packages/core/src/expander.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import { addToolDefinitionsMessage, appendSystemMessage } from "./chat"
import { importPrompt } from "./importprompt"
import { parseModelIdentifier } from "./models"
import { runtimeHost } from "./host"
import { resolveSystems } from "./systems"
import { addFallbackToolSystems, resolveSystems } from "./systems"
import { GenerationOptions } from "./generation"
import {
AICIRequest,
Expand Down Expand Up @@ -356,7 +356,7 @@ export async function expandTemplate(
trace.endDetails()
}

if (systems.includes("system.tool_calls")) {
if (addFallbackToolSystems(systems, tools, template, options)) {
addToolDefinitionsMessage(messages, tools)
options.fallbackTools = true
}
Expand Down
5 changes: 5 additions & 0 deletions packages/core/src/genaisrc/system.english.genai.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
system({
title: "Use english output",
})
$`## English output
Use English in the output of the system. Use English in the reasoning output as well.`
1 change: 1 addition & 0 deletions packages/core/src/llms.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
"bearerToken": true,
"transcribe": true,
"speech": true,
"listModels": true,
"aliases": {
"large": "gpt-4o",
"small": "gpt-4o-mini",
Expand Down
2 changes: 1 addition & 1 deletion packages/core/src/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ export const OpenAIListModels: ListModelsFunction = async (cfg, options) => {
return {
ok: false,
status: res.status,
error: serializeError(res.statusText),
error: serializeError(await res.json()),
}
const { data } = (await res.json()) as {
object: "list"
Expand Down
Loading

0 comments on commit 1d4b443

Please sign in to comment.