Skip to content

Commit

Permalink
Merge branch 'master' into draft-llamafile-support
Browse files Browse the repository at this point in the history
  • Loading branch information
Pwuts committed Jul 17, 2024
2 parents b7da031 + 0b9f3be commit f6a4d9f
Show file tree
Hide file tree
Showing 80 changed files with 9,147 additions and 1,107 deletions.
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
Expand Down
47 changes: 47 additions & 0 deletions .vscode/all-projects.code-workspace
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
{
"folders": [
{
"name": "autogpt",
"path": "../autogpt"
},
{
"name": "benchmark",
"path": "../benchmark"
},
{
"name": "docs",
"path": "../docs"
},
{
"name": "forge",
"path": "../forge"
},
{
"name": "frontend",
"path": "../frontend"
},
{
"name": "autogpt_server",
"path": "../rnd/autogpt_server"
},
{
"name": "autogpt_builder",
"path": "../rnd/autogpt_builder"
},
{
"name": "[root]",
"path": ".."
}
],
"settings": {},
"extensions": {
"recommendations": [
"charliermarsh.ruff",
"dart-code.flutter",
"ms-python.black-formatter",
"ms-python.vscode-pylance",
"prisma.prisma",
"qwtel.sqlite-viewer"
]
}
}
10 changes: 5 additions & 5 deletions autogpt/autogpt/agent_factory/profile_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


class AgentProfileGeneratorConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable(
llm_classification: LanguageModelClassification = UserConfigurable(
default=LanguageModelClassification.SMART_MODEL
)
_example_call: object = {
Expand Down Expand Up @@ -148,21 +148,21 @@ class AgentProfileGenerator(PromptStrategy):

def __init__(
self,
model_classification: LanguageModelClassification,
llm_classification: LanguageModelClassification,
system_prompt: str,
user_prompt_template: str,
create_agent_function: dict,
):
self._model_classification = model_classification
self._llm_classification = llm_classification
self._system_prompt_message = system_prompt
self._user_prompt_template = user_prompt_template
self._create_agent_function = CompletionModelFunction.model_validate(
create_agent_function
)

@property
def model_classification(self) -> LanguageModelClassification:
return self._model_classification
def llm_classification(self) -> LanguageModelClassification:
return self._llm_classification

def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
system_message = ChatMessage.system(self._system_prompt_message)
Expand Down
2 changes: 1 addition & 1 deletion autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def __init__(
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
llm_provider,
ActionHistoryConfiguration(
model_name=app_config.fast_llm, max_tokens=self.send_token_limit
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
),
)
.run_after(WatchdogComponent)
Expand Down
2 changes: 1 addition & 1 deletion autogpt/autogpt/agents/prompt_strategies/one_shot.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def __init__(
self.logger = logger

@property
def model_classification(self) -> LanguageModelClassification:
def llm_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching

def build_prompt(
Expand Down
6 changes: 3 additions & 3 deletions docs/content/forge/components/built-in-components.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Necessary for saving and loading agent's state (preserving session).

| Config variable | Details | Type | Default |
| ---------------- | -------------------------------------- | ----- | ---------------------------------- |
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
| `workspace_path` | Path to files that agent has access to | `str` | `agents/{agent_id}/workspace/`[^1] |

[^1] This option is set dynamically during component construction as opposed to by default inside the configuration model, `{agent_id}` is replaced with the agent's unique identifier.
Expand Down Expand Up @@ -84,7 +84,7 @@ Keeps track of agent's actions and their outcomes. Provides their summary to the

| Config variable | Details | Type | Default |
| ---------------------- | ------------------------------------------------------- | ----------- | ------------------ |
| `model_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `llm_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `max_tokens` | Maximum number of tokens to use for the history summary | `int` | `1024` |
| `spacy_language_model` | Language model used for summary chunking using spacy | `str` | `"en_core_web_sm"` |
| `full_message_count` | Number of cycles to include unsummarized in the prompt | `int` | `4` |
Expand Down Expand Up @@ -178,7 +178,7 @@ Allows agent to read websites using Selenium.

| Config variable | Details | Type | Default |
| ----------------------------- | ------------------------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
| `model_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
| `llm_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
| `web_browser` | Web browser used by Selenium | `"chrome" \| "firefox" \| "safari" \| "edge"` | `"chrome"` |
| `headless` | Run browser in headless mode | `bool` | `True` |
| `user_agent` | User agent used by the browser | `str` | `"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"` |
Expand Down
172 changes: 172 additions & 0 deletions docs/content/server/new_nodes.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
# Contributing to AutoGPT Agent Server: Creating Nodes!

This guide will walk you through the process of creating a new node (also called a block) for the AutoGPT Agent Server. We'll use the GetWikipediaSummary node as an example.

## Understanding Nodes

Nodes are reusable components that can be connected to form a graph representing an agent's behavior. Each node has inputs, outputs, and a specific function it performs.

## Creating a New Node

To create a new node, follow these steps:

1. **Create a new Python file** in the `autogpt_server/blocks` directory. Name it descriptively and use snake_case. For example: `get_wikipedia_summary.py`.

2. **Import necessary modules and create a class that inherits from `Block`**. Make sure to include all necessary imports for your node. Every node should contain:

```python
from autogpt_server.data.block import Block, BlockSchema, BlockOutput
```

Example for the Wikipedia summary node:

```python
import requests
from autogpt_server.data.block import Block, BlockSchema, BlockOutput

class GetWikipediaSummary(Block):
# Node implementation will go here
```

3. **Define the input and output schemas** using `BlockSchema`. These schemas specify the data structure that the node expects to receive (input) and produce (output).

- The input schema defines the structure of the data the node will process. Each field in the schema represents a required piece of input data.
- The output schema defines the structure of the data the node will return after processing. Each field in the schema represents a piece of output data.

Example:

```python
class Input(BlockSchema):
topic: str # The topic to get the Wikipedia summary for

class Output(BlockSchema):
summary: str # The summary of the topic from Wikipedia
```

4. **Implement the `__init__` method**:

```python
def __init__(self):
super().__init__(
id="h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m", # Unique ID for the node
input_schema=GetWikipediaSummary.Input, # Assign input schema
output_schema=GetWikipediaSummary.Output, # Assign output schema

# Provide sample input and output for testing the node

test_input={"topic": "Artificial Intelligence"},
test_output={"summary": "Artificial intelligence (AI) is intelligence demonstrated by machines, in contrast to the natural intelligence displayed by humans and animals."},
)
```

- `id`: A unique identifier for the node.
- `input_schema` and `output_schema`: Define the structure of the input and output data.
- `test_input` and `test_output`: Provide sample input and output data for testing the node.

5. **Implement the `run` method**, which contains the main logic of the node:

```python
def run(self, input_data: Input) -> BlockOutput:
try:
# Make the request to Wikipedia API
response = requests.get(f"https://en.wikipedia.org/api/rest_v1/page/summary/{input_data.topic}")
response.raise_for_status()
summary_data = response.json()

# Output the summary
yield "summary", summary_data['extract']

except requests.exceptions.HTTPError as http_err:
raise ValueError(f"HTTP error occurred: {http_err}")
except requests.RequestException as e:
raise ValueError(f"Request to Wikipedia API failed: {e}")
except KeyError as e:
raise ValueError(f"Error processing Wikipedia data: {e}")
```

- **Try block**: Contains the main logic to fetch and process the Wikipedia summary.
- **API request**: Send a GET request to the Wikipedia API.
- **Error handling**: Handle various exceptions that might occur during the API request and data processing.
- **Yield**: Use `yield` to output the results.

6. **Register the new node** by adding it to the `__init__.py` file in the `autogpt_server/blocks` directory. This step makes your new node available to the rest of the server.

- Open the `__init__.py` file in the `autogpt_server/blocks` directory.
- Add an import statement for your new node at the top of the file.
- Add the new node to the `AVAILABLE_BLOCKS` and `__all__` lists.

Example:

```python
from autogpt_server.blocks import sample, reddit, text, ai, wikipedia, discord, get_wikipedia_summary # Import your new node
from autogpt_server.data.block import Block

AVAILABLE_BLOCKS = {
block.id: block
for block in [v() for v in Block.__subclasses__()]
}

__all__ = ["ai", "sample", "reddit", "text", "AVAILABLE_BLOCKS", "wikipedia", "discord", "get_wikipedia_summary"]
```

- The import statement ensures your new node is included in the module.
- The `AVAILABLE_BLOCKS` dictionary includes all blocks by their ID.
- The `__all__` list specifies all public objects that the module exports.

### Full Code example

Here is the complete implementation of the `GetWikipediaSummary` nodes:

```python
import requests
from autogpt_server.data.block import Block, BlockSchema, BlockOutput

class GetWikipediaSummary(Block):
# Define the input schema with the required field 'topic'
class Input(BlockSchema):
topic: str # The topic to get the Wikipedia summary for

# Define the output schema with the field 'summary'
class Output(BlockSchema):
summary: str # The summary of the topic from Wikipedia

def __init__(self):
super().__init__(
id="h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m", # Unique ID for the node
input_schema=GetWikipediaSummary.Input, # Assign input schema
output_schema=GetWikipediaSummary.Output, # Assign output schema

# Provide sample input and output for testing the node

test_input={"topic": "Artificial Intelligence"},
test_output={"summary": "Artificial intelligence (AI) is intelligence demonstrated by machines, in contrast to the natural intelligence displayed by humans and animals."},
)

def run(self, input_data: Input) -> BlockOutput:
try:
# Make the request to Wikipedia API
response = requests.get(f"https://en.wikipedia.org/api/rest_v1/page/summary/{input_data.topic}")
response.raise_for_status()
summary_data = response.json()

# Output the summary
yield "summary", summary_data['extract']

except requests.exceptions.HTTPError as http_err:
raise ValueError(f"HTTP error occurred: {http_err}")
except requests.RequestException as e:
raise ValueError(f"Request to Wikipedia API failed: {e}")
except KeyError as e:
raise ValueError(f"Error processing Wikipedia data: {e}")
```

## Key Points to Remember

- **Unique ID**: Give your node a unique ID in the `__init__` method.
- **Input and Output Schemas**: Define clear input and output schemas.
- **Error Handling**: Implement error handling in the `run` method.
- **Output Results**: Use `yield` to output results in the `run` method.
- **Register the Node**: Add your new node to the `__init__.py` file to make it available to the server.
- **Testing**: Provide test input and output in the `__init__` method for automatic testing.

By following these steps, you can create new nodes that extend the functionality of the AutoGPT Agent Server.
3 changes: 3 additions & 0 deletions docs/mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ nav:
- Readme: https://github.com/Significant-Gravitas/AutoGPT/blob/master/frontend/README.md

- Docs: docs/index.md

- Server:
- Build your own Node: server/new_nodes.md

# - Challenges:
# - Introduction: challenges/introduction.md
Expand Down
4 changes: 2 additions & 2 deletions forge/forge/components/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
"github_username": null
},
"ActionHistoryConfiguration": {
"model_name": "gpt-3.5-turbo",
"llm_name": "gpt-3.5-turbo",
"max_tokens": 1024,
"spacy_language_model": "en_core_web_sm"
},
Expand All @@ -129,7 +129,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
"duckduckgo_max_attempts": 3
},
"WebSeleniumConfiguration": {
"model_name": "gpt-3.5-turbo",
"llm_name": "gpt-3.5-turbo",
"web_browser": "chrome",
"headless": true,
"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
Expand Down
4 changes: 2 additions & 2 deletions forge/forge/components/action_history/action_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@


class ActionHistoryConfiguration(BaseModel):
model_name: ModelName = OpenAIModelName.GPT3
llm_name: ModelName = OpenAIModelName.GPT3
"""Name of the llm model used to compress the history"""
max_tokens: int = 1024
"""Maximum number of tokens to use up with generated history messages"""
Expand Down Expand Up @@ -97,7 +97,7 @@ def after_parse(self, result: AnyProposal) -> None:
async def after_execute(self, result: ActionResult) -> None:
self.event_history.register_result(result)
await self.event_history.handle_compression(
self.llm_provider, self.config.model_name, self.config.spacy_language_model
self.llm_provider, self.config.llm_name, self.config.spacy_language_model
)

@staticmethod
Expand Down
8 changes: 4 additions & 4 deletions forge/forge/components/web/selenium.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class BrowsingError(CommandExecutionError):


class WebSeleniumConfiguration(BaseModel):
model_name: ModelName = OpenAIModelName.GPT3
llm_name: ModelName = OpenAIModelName.GPT3
"""Name of the llm model used to read websites"""
web_browser: Literal["chrome", "firefox", "safari", "edge"] = "chrome"
"""Web browser used by Selenium"""
Expand Down Expand Up @@ -164,7 +164,7 @@ async def read_webpage(
elif get_raw_content:
if (
output_tokens := self.llm_provider.count_tokens(
text, self.config.model_name
text, self.config.llm_name
)
) > MAX_RAW_CONTENT_LENGTH:
oversize_factor = round(output_tokens / MAX_RAW_CONTENT_LENGTH, 1)
Expand Down Expand Up @@ -382,7 +382,7 @@ async def summarize_webpage(
text,
topics_of_interest=topics_of_interest,
llm_provider=self.llm_provider,
model_name=self.config.model_name,
model_name=self.config.llm_name,
spacy_model=self.config.browse_spacy_language_model,
)
return "\n".join(f"* {i}" for i in information)
Expand All @@ -391,7 +391,7 @@ async def summarize_webpage(
text,
question=question,
llm_provider=self.llm_provider,
model_name=self.config.model_name,
model_name=self.config.llm_name,
spacy_model=self.config.browse_spacy_language_model,
)
return result
2 changes: 1 addition & 1 deletion forge/forge/llm/prompting/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
class PromptStrategy(abc.ABC):
@property
@abc.abstractmethod
def model_classification(self) -> LanguageModelClassification:
def llm_classification(self) -> LanguageModelClassification:
...

@abc.abstractmethod
Expand Down
Loading

0 comments on commit f6a4d9f

Please sign in to comment.