Skip to content

Commit

Permalink
feat(forge, agent, benchmark): Upgrade to Pydantic v2 (#7280)
Browse files Browse the repository at this point in the history
Update Pydantic dependency of `autogpt`, `forge` and `benchmark` to `^2.7`
[Pydantic Migration Guide](https://docs.pydantic.dev/2.7/migration/)

- Migrate usages of now-deprecated functions to their replacements
- Update `Field` definitions
  - Ellipsis `...` for required fields is deprecated
  - `Field` no longer supports extra `kwargs`, replace use of this feature with field metadata
- Replace `Config` class for specifying model configuration with `model_config = ConfigDict(..)`
- Removed `ModelContainer` in `BaseAgent`, component configuration dict is now directly serialized using Pydantic v2 helper functions
- Forked `agent-protocol` and updated `packages/client/python` for Pydantic v2 support: https://github.com/Significant-Gravitas/agent-protocol

---------

Co-authored-by: Reinier van der Leer <[email protected]>
  • Loading branch information
kcze and Pwuts authored Jul 2, 2024
1 parent 8feaced commit 7cb4d4a
Show file tree
Hide file tree
Showing 54 changed files with 993 additions and 605 deletions.
2 changes: 1 addition & 1 deletion autogpt/autogpt/agent_factory/configurators.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,5 +104,5 @@ def create_agent_state(
allow_fs_access=not app_config.restrict_to_workspace,
use_functions_api=app_config.openai_functions,
),
history=Agent.default_settings.history.copy(deep=True),
history=Agent.default_settings.history.model_copy(deep=True),
)
6 changes: 3 additions & 3 deletions autogpt/autogpt/agent_factory/profile_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ class AgentProfileGeneratorConfiguration(SystemConfiguration):
required=True,
),
},
).dict()
).model_dump()
)


Expand All @@ -156,7 +156,7 @@ def __init__(
self._model_classification = model_classification
self._system_prompt_message = system_prompt
self._user_prompt_template = user_prompt_template
self._create_agent_function = CompletionModelFunction.parse_obj(
self._create_agent_function = CompletionModelFunction.model_validate(
create_agent_function
)

Expand Down Expand Up @@ -222,7 +222,7 @@ async def generate_agent_profile_for_task(
AIConfig: The AIConfig object tailored to the user's input
"""
agent_profile_generator = AgentProfileGenerator(
**AgentProfileGenerator.default_configuration.dict() # HACK
**AgentProfileGenerator.default_configuration.model_dump() # HACK
)

prompt = agent_profile_generator.build_prompt(task)
Expand Down
6 changes: 4 additions & 2 deletions autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,9 @@ def __init__(
super().__init__(settings)

self.llm_provider = llm_provider
prompt_config = OneShotAgentPromptStrategy.default_configuration.copy(deep=True)
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(
deep=True
)
prompt_config.use_functions_api = (
settings.config.use_functions_api
# Anthropic currently doesn't support tools + prefilling :(
Expand Down Expand Up @@ -160,7 +162,7 @@ async def propose_action(self) -> OneShotAgentActionProposal:
constraints = await self.run_pipeline(DirectiveProvider.get_constraints)
best_practices = await self.run_pipeline(DirectiveProvider.get_best_practices)

directives = self.state.directives.copy(deep=True)
directives = self.state.directives.model_copy(deep=True)
directives.resources += resources
directives.constraints += constraints
directives.best_practices += best_practices
Expand Down
24 changes: 13 additions & 11 deletions autogpt/autogpt/agents/prompt_strategies/one_shot.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,13 @@

class AssistantThoughts(ModelWithSummary):
observations: str = Field(
..., description="Relevant observations from your last action (if any)"
description="Relevant observations from your last action (if any)"
)
text: str = Field(..., description="Thoughts")
reasoning: str = Field(..., description="Reasoning behind the thoughts")
self_criticism: str = Field(..., description="Constructive self-criticism")
plan: list[str] = Field(
..., description="Short list that conveys the long-term plan"
)
speak: str = Field(..., description="Summary of thoughts, to say to user")
text: str = Field(description="Thoughts")
reasoning: str = Field(description="Reasoning behind the thoughts")
self_criticism: str = Field(description="Constructive self-criticism")
plan: list[str] = Field(description="Short list that conveys the long-term plan")
speak: str = Field(description="Summary of thoughts, to say to user")

def summary(self) -> str:
return self.text
Expand Down Expand Up @@ -96,7 +94,9 @@ def __init__(
logger: Logger,
):
self.config = configuration
self.response_schema = JSONSchema.from_dict(OneShotAgentActionProposal.schema())
self.response_schema = JSONSchema.from_dict(
OneShotAgentActionProposal.model_json_schema()
)
self.logger = logger

@property
Expand Down Expand Up @@ -182,7 +182,7 @@ def build_system_prompt(
)

def response_format_instruction(self, use_functions_api: bool) -> tuple[str, str]:
response_schema = self.response_schema.copy(deep=True)
response_schema = self.response_schema.model_copy(deep=True)
assert response_schema.properties
if use_functions_api and "use_tool" in response_schema.properties:
del response_schema.properties["use_tool"]
Expand Down Expand Up @@ -274,6 +274,8 @@ def parse_response_content(
raise InvalidAgentResponseError("Assistant did not use a tool")
assistant_reply_dict["use_tool"] = response.tool_calls[0].function

parsed_response = OneShotAgentActionProposal.parse_obj(assistant_reply_dict)
parsed_response = OneShotAgentActionProposal.model_validate(
assistant_reply_dict
)
parsed_response.raw_message = response.copy()
return parsed_response
10 changes: 6 additions & 4 deletions autogpt/autogpt/app/agent_protocol_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Ste
""
if tool_result is None
else (
orjson.loads(tool_result.json())
orjson.loads(tool_result.model_dump_json())
if not isinstance(tool_result, ActionErrorResult)
else {
"error": str(tool_result.error),
Expand All @@ -327,7 +327,7 @@ async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Ste
if last_proposal and tool_result
else {}
),
**assistant_response.dict(),
**assistant_response.model_dump(),
}

task_cumulative_cost = agent.llm_provider.get_incurred_cost()
Expand Down Expand Up @@ -451,15 +451,17 @@ def _get_task_llm_provider(self, task: Task, step_id: str = "") -> MultiProvider
"""
task_llm_budget = self._task_budgets[task.task_id]

task_llm_provider_config = self.llm_provider._configuration.copy(deep=True)
task_llm_provider_config = self.llm_provider._configuration.model_copy(
deep=True
)
_extra_request_headers = task_llm_provider_config.extra_request_headers
_extra_request_headers["AP-TaskID"] = task.task_id
if step_id:
_extra_request_headers["AP-StepID"] = step_id
if task.additional_input and (user_id := task.additional_input.get("user_id")):
_extra_request_headers["AutoGPT-UserID"] = user_id

settings = self.llm_provider._settings.copy()
settings = self.llm_provider._settings.model_copy()
settings.budget = task_llm_budget
settings.configuration = task_llm_provider_config
task_llm_provider = self.llm_provider.__class__(
Expand Down
14 changes: 7 additions & 7 deletions autogpt/autogpt/app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
import os
import re
from pathlib import Path
from typing import Any, Optional, Union
from typing import Optional, Union

import forge
from forge.config.base import BaseConfig
from forge.llm.providers import CHAT_MODELS, ModelName
from forge.llm.providers.openai import OpenAICredentials, OpenAIModelName
from forge.logging.config import LoggingConfig
from forge.models.config import Configurable, UserConfigurable
from pydantic import SecretStr, validator
from pydantic import SecretStr, ValidationInfo, field_validator

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -91,15 +91,15 @@ class AppConfig(BaseConfig):
default=AZURE_CONFIG_FILE, from_env="AZURE_CONFIG_FILE"
)

@validator("openai_functions")
def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
if v:
smart_llm = values["smart_llm"]
@field_validator("openai_functions")
def validate_openai_functions(cls, value: bool, info: ValidationInfo):
if value:
smart_llm = info.data["smart_llm"]
assert CHAT_MODELS[smart_llm].has_function_call_api, (
f"Model {smart_llm} does not support tool calling. "
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
)
return v
return value


class ConfigBuilder(Configurable[AppConfig]):
Expand Down
Loading

0 comments on commit 7cb4d4a

Please sign in to comment.