diff --git a/.docs/astro.config.mjs b/.docs/astro.config.mjs index 26d19b93..df9e924a 100644 --- a/.docs/astro.config.mjs +++ b/.docs/astro.config.mjs @@ -14,11 +14,7 @@ export default defineConfig({ sidebar: [ { label: "Guides", - items: [ - // Each item here is one entry in the navigation menu. - { label: "Quickstart", slug: "guides/quickstart" }, - { label: "Customizing", slug: "guides/customizing" }, - ], + autogenerate: { directory: "guides" }, }, { label: "Reference", diff --git a/.docs/components/FetchedData.astro b/.docs/components/FetchedData.astro deleted file mode 100644 index a09a8998..00000000 --- a/.docs/components/FetchedData.astro +++ /dev/null @@ -1,31 +0,0 @@ ---- -const { data } = Astro.props; // Receive the data as a prop -const response = await fetch(data); -const fetchedData = await response.text(); - -// Function to extract the basename from a URL -const getBasename = (url: string | URL) => { - const urlObj = new URL(url); - return urlObj.pathname.split('/').filter(Boolean).pop() || 'Index'; -}; - -const basename = getBasename(data); ---- - - - -
{fetchedData}-
+
diff --git a/.docs/src/content/docs/guides/customizing.mdx b/.docs/src/content/docs/guides/customizing.mdx
index d57342cf..41f3a8c1 100644
--- a/.docs/src/content/docs/guides/customizing.mdx
+++ b/.docs/src/content/docs/guides/customizing.mdx
@@ -19,14 +19,16 @@ You do not need to fork the repository to add your own custom prompts. Copy the
### Configuring Model Name
-The model name is used to invoke the large language model. By default it is named 'model' and you can use 'model' when you invoke any commands. For example you can ask the model to do something for you by saying 'model ask \'. You can override this in your own configuration without changing this repository. To do so just create another talon list with the same name and a higher specificity. Here is an example that you can copy and past into your own configuration files
+The word `model` is the default prefix before all LLM commands to prevent collisions with other Talon commands. However, you can change or override it. To do so just create another talon list with the same name and a higher specificity. Here is an example that you can copy and past into your own configuration files
-```yml
+```yml title="myCustomModelName.talon-list"
list: user.model
-
-# The default model name
-model: model
+# Whatever you say that matches the value on the left will be mapped to the word `model`
+# and thus allow any model commands to work as normal, but with a new prefix keyword
+
+my custom model name: model
```
### Providing Custom User Context
diff --git a/.docs/src/content/docs/guides/quickstart.md b/.docs/src/content/docs/guides/quickstart.mdx
similarity index 67%
rename from .docs/src/content/docs/guides/quickstart.md
rename to .docs/src/content/docs/guides/quickstart.mdx
index 58eaa3a3..f91528ee 100644
--- a/.docs/src/content/docs/guides/quickstart.md
+++ b/.docs/src/content/docs/guides/quickstart.mdx
@@ -1,6 +1,8 @@
---
title: Quickstart
description: Set up talon-ai-tools in under 5 min
+sidebar:
+ order: 1
---
:::note
@@ -9,24 +11,30 @@ This assumes you have Talon installed. If you are new to Talon check out the Qui
:::
+import { Steps } from "@astrojs/starlight/components";
+
+
+
1. Download or `git clone` this repo into your Talon user directory.
1. [Obtain an OpenAI API key](https://platform.openai.com/signup).
1. Create a Python file anywhere in your Talon user directory.
1. Set the key environment variable within the Python file
-:::caution
+ :::caution
-Make sure you do not push the key to a public repo!
+ Make sure you do not push the key to a public repo!
-:::
+ :::
+
+ ```python
+ # Example of setting the environment variable
+ import os
-```python
-# Example of setting the environment variable
-import os
+ os.environ["OPENAI_API_KEY"] = "YOUR-KEY-HERE"
+ ```
-os.environ["OPENAI_API_KEY"] = "YOUR-KEY-HERE"
-```
+
### Quickstart Video
diff --git a/.docs/src/content/docs/reference/gpt.mdx b/.docs/src/content/docs/reference/gpt.mdx
index 0972a5f4..52a4f43d 100644
--- a/.docs/src/content/docs/reference/gpt.mdx
+++ b/.docs/src/content/docs/reference/gpt.mdx
@@ -1,11 +1,13 @@
---
title: GPT Commands
description: Run
+sidebar:
+ order: 1
---
import FetchedData from "../../../components/FetchedData.astro";
-GPT commands make requests to an LLM endpoint. In talon each GPT command is prefixed with the word `model`. A model command has 4 parts
+GPT commands make requests to an LLM endpoint. In our grammar, each GPT command is prefixed with the word `model`. A model command has 4 parts
- `model`
- A prompt name
@@ -34,7 +36,7 @@ There are also a few other special commands like `model please` and `model ask`
## All model options
-_The left is the spoken word, and the right, is how talon interprets it_
+_The left is the spoken word, and the right is how talon interprets it_
### Mappings of prompts to their spoken name