From 2c0286e41122154b248da8aec3271a301e0c330a Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Thu, 17 Oct 2024 16:51:10 +0300 Subject: [PATCH 01/24] feat(backend): Add credit for Jina/Search & LLM blocks (#8361) --- .../backend/backend/blocks/llm.py | 36 +++++++++---------- .../backend/backend/data/credit.py | 5 +++ 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index f38b5f5da72d..a55dbdf10565 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -96,25 +96,25 @@ def cost_factor(self) -> int: MODEL_METADATA = { - LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=60), - LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=30), - LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=10), - LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=12), - LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=11), - LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=8), - LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=14), - LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=13), - LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=6), - LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=9), - LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=7), - LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=6), - LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=7), - LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=10), + LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=16), + LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=4), + LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=1), + LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=3), + LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=10), + LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=1), + LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=4), + LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=1), + LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=1), + LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=1), # Limited to 16k during preview - LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=15), - LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=13), - LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=7), - LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=11), + LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=1), + LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=1), + LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=1), + LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=1), } for model in LlmModel: diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index e1fccb42897f..5581a7854226 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -17,6 +17,7 @@ AITextSummarizerBlock, LlmModel, ) +from backend.blocks.search import ExtractWebsiteContentBlock, SearchTheWebBlock from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock from backend.data.block import Block, BlockInput, get_block from backend.util.settings import Config @@ -74,6 +75,10 @@ def __init__( CreateTalkingAvatarVideoBlock: [ BlockCost(cost_amount=15, cost_filter={"api_key": None}) ], + SearchTheWebBlock: [BlockCost(cost_amount=1)], + ExtractWebsiteContentBlock: [ + BlockCost(cost_amount=1, cost_filter={"raw_content": False}) + ], } From 5d4d2486da1d74863a4f41b4ab57fcd9e2134d38 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 17 Oct 2024 18:15:15 +0200 Subject: [PATCH 02/24] ci: Enforce `dev` as base branch for development (#8369) * Create repo-pr-enforce-base-branch.yml * fix quotes * test * fix github token * fix trigger and CLI config * change back trigger because otherwise I can't test it * fix the fix * fix repo selection * fix perms? * fix quotes and newlines escaping in message * Update repo-pr-enforce-base-branch.yml * grrr escape sequences in bash * test * clean up --------- Co-authored-by: Aarushi <50577581+aarushik93@users.noreply.github.com> --- .../workflows/repo-pr-enforce-base-branch.yml | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/repo-pr-enforce-base-branch.yml diff --git a/.github/workflows/repo-pr-enforce-base-branch.yml b/.github/workflows/repo-pr-enforce-base-branch.yml new file mode 100644 index 000000000000..3d4bd9096a52 --- /dev/null +++ b/.github/workflows/repo-pr-enforce-base-branch.yml @@ -0,0 +1,21 @@ +name: Repo - Enforce dev as base branch +on: + pull_request_target: + branches: [ master ] + types: [ opened ] + +jobs: + check_pr_target: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Check if PR is from dev or hotfix + if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }} + run: | + gh pr comment ${{ github.event.number }} --repo "$REPO" \ + --body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.' + gh pr edit ${{ github.event.number }} --base dev --repo "$REPO" + env: + GITHUB_TOKEN: ${{ github.token }} + REPO: ${{ github.repository }} From 7f6354caaee4f1fed045f2b60b4eaba42db367ae Mon Sep 17 00:00:00 2001 From: Kushal Agrawal <98145879+kushal34712@users.noreply.github.com> Date: Thu, 17 Oct 2024 21:56:35 +0530 Subject: [PATCH 03/24] Update README.md (#8319) * Update README.md * Update README.md --------- Co-authored-by: Aarushi <50577581+aarushik93@users.noreply.github.com> --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 2371fa39f8d0..722da4223019 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,8 @@ To maintain a uniform standard and ensure seamless compatibility with many curre --- +## Stars stats +

@@ -167,3 +169,10 @@ To maintain a uniform standard and ensure seamless compatibility with many curre

+ + +## ⚡ Contributors + + + Contributors + From 26b1bca03391172f1bc5bfc6d94aaf2010c7edf3 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 18 Oct 2024 06:22:05 +0300 Subject: [PATCH 04/24] refactor(backend): Make block fields consistently use SchemaField (#8360) --- .../backend/backend/blocks/__init__.py | 11 +++ .../blocks/ai_shortform_video_block.py | 15 ++-- .../backend/backend/blocks/basic.py | 21 ++--- .../backend/backend/blocks/block.py | 13 ++- .../backend/backend/blocks/csv.py | 50 ++++++++--- .../backend/backend/blocks/discord.py | 21 +++-- .../backend/backend/blocks/email_block.py | 8 +- .../backend/backend/blocks/http.py | 27 ++++-- .../backend/backend/blocks/ideogram.py | 6 -- .../backend/backend/blocks/llm.py | 81 ++++++++++++++---- .../backend/backend/blocks/medium.py | 15 +++- .../backend/backend/blocks/reddit.py | 22 ++--- .../backend/backend/blocks/search.py | 47 +++++++---- .../backend/backend/blocks/talking_head.py | 3 +- .../backend/backend/blocks/text.py | 52 +++++++----- .../backend/backend/blocks/time_blocks.py | 83 ++++++++++++++----- .../backend/backend/blocks/youtube.py | 7 +- .../backend/test/util/test_service.py | 6 +- 18 files changed, 338 insertions(+), 150 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/__init__.py b/autogpt_platform/backend/backend/blocks/__init__.py index 1fd85aef4630..940956a20b6f 100644 --- a/autogpt_platform/backend/backend/blocks/__init__.py +++ b/autogpt_platform/backend/backend/blocks/__init__.py @@ -69,6 +69,17 @@ def all_subclasses(clz): f"{block.name} `error` field in output_schema must be a string" ) + # Make sure all fields in input_schema and output_schema are annotated and has a value + for field_name, field in [*input_schema.items(), *output_schema.items()]: + if field.annotation is None: + raise ValueError( + f"{block.name} has a field {field_name} that is not annotated" + ) + if field.json_schema_extra is None: + raise ValueError( + f"{block.name} has a field {field_name} not defined as SchemaField" + ) + for field in block.input_schema.model_fields.values(): if field.annotation is bool and field.default not in (True, False): raise ValueError(f"{block.name} has a boolean field with no default value") diff --git a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py index 127bb3ae8b4a..3fe92950c199 100644 --- a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py +++ b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py @@ -3,7 +3,6 @@ from enum import Enum import requests -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import BlockSecret, SchemaField, SecretField @@ -129,9 +128,13 @@ class Input(BlockSchema): description="""1. Use short and punctuated sentences\n\n2. Use linebreaks to create a new clip\n\n3. Text outside of brackets is spoken by the AI, and [text between brackets] will be used to guide the visual generation. For example, [close-up of a cat] will show a close-up of a cat.""", placeholder="[close-up of a cat] Meow!", ) - ratio: str = Field(description="Aspect ratio of the video", default="9 / 16") - resolution: str = Field(description="Resolution of the video", default="720p") - frame_rate: int = Field(description="Frame rate of the video", default=60) + ratio: str = SchemaField( + description="Aspect ratio of the video", default="9 / 16" + ) + resolution: str = SchemaField( + description="Resolution of the video", default="720p" + ) + frame_rate: int = SchemaField(description="Frame rate of the video", default=60) generation_preset: GenerationPreset = SchemaField( description="Generation preset for visual style - only effects AI generated visuals", default=GenerationPreset.LEONARDO, @@ -154,8 +157,8 @@ class Input(BlockSchema): ) class Output(BlockSchema): - video_url: str = Field(description="The URL of the created video") - error: str = Field(description="Error message if the request failed") + video_url: str = SchemaField(description="The URL of the created video") + error: str = SchemaField(description="Error message if the request failed") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py index 60992e0f4591..391d6b615aad 100644 --- a/autogpt_platform/backend/backend/blocks/basic.py +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -2,7 +2,6 @@ from typing import Any, List from jinja2 import BaseLoader, Environment -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType from backend.data.model import SchemaField @@ -19,18 +18,18 @@ class StoreValueBlock(Block): """ class Input(BlockSchema): - input: Any = Field( + input: Any = SchemaField( description="Trigger the block to produce the output. " "The value is only used when `data` is None." ) - data: Any = Field( + data: Any = SchemaField( description="The constant data to be retained in the block. " "This value is passed as `output`.", default=None, ) class Output(BlockSchema): - output: Any + output: Any = SchemaField(description="The stored data retained in the block.") def __init__(self): super().__init__( @@ -56,10 +55,10 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class PrintToConsoleBlock(Block): class Input(BlockSchema): - text: str + text: str = SchemaField(description="The text to print to the console.") class Output(BlockSchema): - status: str + status: str = SchemaField(description="The status of the print operation.") def __init__(self): super().__init__( @@ -79,12 +78,14 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class FindInDictionaryBlock(Block): class Input(BlockSchema): - input: Any = Field(description="Dictionary to lookup from") - key: str | int = Field(description="Key to lookup in the dictionary") + input: Any = SchemaField(description="Dictionary to lookup from") + key: str | int = SchemaField(description="Key to lookup in the dictionary") class Output(BlockSchema): - output: Any = Field(description="Value found for the given key") - missing: Any = Field(description="Value of the input that missing the key") + output: Any = SchemaField(description="Value found for the given key") + missing: Any = SchemaField( + description="Value of the input that missing the key" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/block.py b/autogpt_platform/backend/backend/blocks/block.py index a4bf8f6ac524..01e8af7238ea 100644 --- a/autogpt_platform/backend/backend/blocks/block.py +++ b/autogpt_platform/backend/backend/blocks/block.py @@ -3,6 +3,7 @@ from typing import Type from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class BlockInstallationBlock(Block): @@ -15,11 +16,17 @@ class BlockInstallationBlock(Block): """ class Input(BlockSchema): - code: str + code: str = SchemaField( + description="Python code of the block to be installed", + ) class Output(BlockSchema): - success: str - error: str + success: str = SchemaField( + description="Success message if the block is installed successfully", + ) + error: str = SchemaField( + description="Error message if the block installation fails", + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/csv.py b/autogpt_platform/backend/backend/blocks/csv.py index b53f6c5ac889..e78c8994737a 100644 --- a/autogpt_platform/backend/backend/blocks/csv.py +++ b/autogpt_platform/backend/backend/blocks/csv.py @@ -1,21 +1,49 @@ from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import ContributorDetails +from backend.data.model import ContributorDetails, SchemaField class ReadCsvBlock(Block): class Input(BlockSchema): - contents: str - delimiter: str = "," - quotechar: str = '"' - escapechar: str = "\\" - has_header: bool = True - skip_rows: int = 0 - strip: bool = True - skip_columns: list[str] = [] + contents: str = SchemaField( + description="The contents of the CSV file to read", + placeholder="a, b, c\n1,2,3\n4,5,6", + ) + delimiter: str = SchemaField( + description="The delimiter used in the CSV file", + default=",", + ) + quotechar: str = SchemaField( + description="The character used to quote fields", + default='"', + ) + escapechar: str = SchemaField( + description="The character used to escape the delimiter", + default="\\", + ) + has_header: bool = SchemaField( + description="Whether the CSV file has a header row", + default=True, + ) + skip_rows: int = SchemaField( + description="The number of rows to skip from the start of the file", + default=0, + ) + strip: bool = SchemaField( + description="Whether to strip whitespace from the values", + default=True, + ) + skip_columns: list[str] = SchemaField( + description="The columns to skip from the start of the row", + default=[], + ) class Output(BlockSchema): - row: dict[str, str] - all_data: list[dict[str, str]] + row: dict[str, str] = SchemaField( + description="The data produced from each row in the CSV file" + ) + all_data: list[dict[str, str]] = SchemaField( + description="All the data in the CSV file as a list of rows" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/discord.py b/autogpt_platform/backend/backend/blocks/discord.py index cd350a619916..e5414cd32727 100644 --- a/autogpt_platform/backend/backend/blocks/discord.py +++ b/autogpt_platform/backend/backend/blocks/discord.py @@ -2,10 +2,9 @@ import aiohttp import discord -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import BlockSecret, SecretField +from backend.data.model import BlockSecret, SchemaField, SecretField class ReadDiscordMessagesBlock(Block): @@ -13,16 +12,18 @@ class Input(BlockSchema): discord_bot_token: BlockSecret = SecretField( key="discord_bot_token", description="Discord bot token" ) - continuous_read: bool = Field( + continuous_read: bool = SchemaField( description="Whether to continuously read messages", default=True ) class Output(BlockSchema): - message_content: str = Field(description="The content of the message received") - channel_name: str = Field( + message_content: str = SchemaField( + description="The content of the message received" + ) + channel_name: str = SchemaField( description="The name of the channel the message was received from" ) - username: str = Field( + username: str = SchemaField( description="The username of the user who sent the message" ) @@ -134,13 +135,15 @@ class Input(BlockSchema): discord_bot_token: BlockSecret = SecretField( key="discord_bot_token", description="Discord bot token" ) - message_content: str = Field(description="The content of the message received") - channel_name: str = Field( + message_content: str = SchemaField( + description="The content of the message received" + ) + channel_name: str = SchemaField( description="The name of the channel the message was received from" ) class Output(BlockSchema): - status: str = Field( + status: str = SchemaField( description="The status of the operation (e.g., 'Message sent', 'Error')" ) diff --git a/autogpt_platform/backend/backend/blocks/email_block.py b/autogpt_platform/backend/backend/blocks/email_block.py index a7f0f82dcee7..79accb6d7d35 100644 --- a/autogpt_platform/backend/backend/blocks/email_block.py +++ b/autogpt_platform/backend/backend/blocks/email_block.py @@ -2,17 +2,17 @@ from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import BlockSecret, SchemaField, SecretField class EmailCredentials(BaseModel): - smtp_server: str = Field( + smtp_server: str = SchemaField( default="smtp.gmail.com", description="SMTP server address" ) - smtp_port: int = Field(default=25, description="SMTP port number") + smtp_port: int = SchemaField(default=25, description="SMTP port number") smtp_username: BlockSecret = SecretField(key="smtp_username") smtp_password: BlockSecret = SecretField(key="smtp_password") @@ -30,7 +30,7 @@ class Input(BlockSchema): body: str = SchemaField( description="Body of the email", placeholder="Enter the email body" ) - creds: EmailCredentials = Field( + creds: EmailCredentials = SchemaField( description="SMTP credentials", default=EmailCredentials(), ) diff --git a/autogpt_platform/backend/backend/blocks/http.py b/autogpt_platform/backend/backend/blocks/http.py index 04d893f84701..74a1d3d0bb0b 100644 --- a/autogpt_platform/backend/backend/blocks/http.py +++ b/autogpt_platform/backend/backend/blocks/http.py @@ -4,6 +4,7 @@ import requests from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class HttpMethod(Enum): @@ -18,15 +19,27 @@ class HttpMethod(Enum): class SendWebRequestBlock(Block): class Input(BlockSchema): - url: str - method: HttpMethod = HttpMethod.POST - headers: dict[str, str] = {} - body: object = {} + url: str = SchemaField( + description="The URL to send the request to", + placeholder="https://api.example.com", + ) + method: HttpMethod = SchemaField( + description="The HTTP method to use for the request", + default=HttpMethod.POST, + ) + headers: dict[str, str] = SchemaField( + description="The headers to include in the request", + default={}, + ) + body: object = SchemaField( + description="The body of the request", + default={}, + ) class Output(BlockSchema): - response: object - client_error: object - server_error: object + response: object = SchemaField(description="The response from the server") + client_error: object = SchemaField(description="The error on 4xx status codes") + server_error: object = SchemaField(description="The error on 5xx status codes") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/ideogram.py b/autogpt_platform/backend/backend/blocks/ideogram.py index 66dd22061447..6818a25371e2 100644 --- a/autogpt_platform/backend/backend/blocks/ideogram.py +++ b/autogpt_platform/backend/backend/blocks/ideogram.py @@ -75,28 +75,24 @@ class Input(BlockSchema): description="The name of the Image Generation Model, e.g., V_2", default=IdeogramModelName.V2, title="Image Generation Model", - enum=IdeogramModelName, advanced=False, ) aspect_ratio: AspectRatio = SchemaField( description="Aspect ratio for the generated image", default=AspectRatio.ASPECT_1_1, title="Aspect Ratio", - enum=AspectRatio, advanced=False, ) upscale: UpscaleOption = SchemaField( description="Upscale the generated image", default=UpscaleOption.NO_UPSCALE, title="Upscale Image", - enum=UpscaleOption, advanced=False, ) magic_prompt_option: MagicPromptOption = SchemaField( description="Whether to use MagicPrompt for enhancing the request", default=MagicPromptOption.AUTO, title="Magic Prompt Option", - enum=MagicPromptOption, advanced=True, ) seed: Optional[int] = SchemaField( @@ -109,7 +105,6 @@ class Input(BlockSchema): description="Style type to apply, applicable for V_2 and above", default=StyleType.AUTO, title="Style Type", - enum=StyleType, advanced=True, ) negative_prompt: Optional[str] = SchemaField( @@ -122,7 +117,6 @@ class Input(BlockSchema): description="Color palette preset name, choose 'None' to skip", default=ColorPalettePreset.NONE, title="Color Palette Preset", - enum=ColorPalettePreset, advanced=True, ) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index a55dbdf10565..daec5b55d392 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -124,7 +124,10 @@ def cost_factor(self) -> int: class AIStructuredResponseGeneratorBlock(Block): class Input(BlockSchema): - prompt: str + prompt: str = SchemaField( + description="The prompt to send to the language model.", + placeholder="Enter your prompt here...", + ) expected_format: dict[str, str] = SchemaField( description="Expected format of the response. If provided, the response will be validated against this format. " "The keys should be the expected fields in the response, and the values should be the description of the field.", @@ -136,15 +139,25 @@ class Input(BlockSchema): advanced=False, ) api_key: BlockSecret = SecretField(value="") - sys_prompt: str = "" - retry: int = 3 + sys_prompt: str = SchemaField( + title="System Prompt", + default="", + description="The system prompt to provide additional context to the model.", + ) + retry: int = SchemaField( + title="Retry Count", + default=3, + description="Number of times to retry the LLM call if the response does not match the expected format.", + ) prompt_values: dict[str, str] = SchemaField( advanced=False, default={}, description="Values used to fill in the prompt." ) class Output(BlockSchema): - response: dict[str, Any] - error: str + response: dict[str, Any] = SchemaField( + description="The response object generated by the language model." + ) + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( @@ -349,7 +362,10 @@ def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: class AITextGeneratorBlock(Block): class Input(BlockSchema): - prompt: str + prompt: str = SchemaField( + description="The prompt to send to the language model.", + placeholder="Enter your prompt here...", + ) model: LlmModel = SchemaField( title="LLM Model", default=LlmModel.GPT4_TURBO, @@ -357,15 +373,25 @@ class Input(BlockSchema): advanced=False, ) api_key: BlockSecret = SecretField(value="") - sys_prompt: str = "" - retry: int = 3 + sys_prompt: str = SchemaField( + title="System Prompt", + default="", + description="The system prompt to provide additional context to the model.", + ) + retry: int = SchemaField( + title="Retry Count", + default=3, + description="Number of times to retry the LLM call if the response does not match the expected format.", + ) prompt_values: dict[str, str] = SchemaField( advanced=False, default={}, description="Values used to fill in the prompt." ) class Output(BlockSchema): - response: str - error: str + response: str = SchemaField( + description="The response generated by the language model." + ) + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( @@ -406,22 +432,43 @@ class SummaryStyle(Enum): class AITextSummarizerBlock(Block): class Input(BlockSchema): - text: str + text: str = SchemaField( + description="The text to summarize.", + placeholder="Enter the text to summarize here...", + ) model: LlmModel = SchemaField( title="LLM Model", default=LlmModel.GPT4_TURBO, description="The language model to use for summarizing the text.", ) - focus: str = "general information" - style: SummaryStyle = SummaryStyle.CONCISE + focus: str = SchemaField( + title="Focus", + default="general information", + description="The topic to focus on in the summary", + ) + style: SummaryStyle = SchemaField( + title="Summary Style", + default=SummaryStyle.CONCISE, + description="The style of the summary to generate.", + ) api_key: BlockSecret = SecretField(value="") # TODO: Make this dynamic - max_tokens: int = 4000 # Adjust based on the model's context window - chunk_overlap: int = 100 # Overlap between chunks to maintain context + max_tokens: int = SchemaField( + title="Max Tokens", + default=4096, + description="The maximum number of tokens to generate in the chat completion.", + ge=1, + ) + chunk_overlap: int = SchemaField( + title="Chunk Overlap", + default=100, + description="The number of overlapping tokens between chunks to maintain context.", + ge=0, + ) class Output(BlockSchema): - summary: str - error: str + summary: str = SchemaField(description="The final summary of the text.") + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/medium.py b/autogpt_platform/backend/backend/blocks/medium.py index 04ebe8fab012..1d85e0978082 100644 --- a/autogpt_platform/backend/backend/blocks/medium.py +++ b/autogpt_platform/backend/backend/blocks/medium.py @@ -1,3 +1,4 @@ +from enum import Enum from typing import List import requests @@ -6,6 +7,12 @@ from backend.data.model import BlockSecret, SchemaField, SecretField +class PublishToMediumStatus(str, Enum): + PUBLIC = "public" + DRAFT = "draft" + UNLISTED = "unlisted" + + class PublishToMediumBlock(Block): class Input(BlockSchema): author_id: BlockSecret = SecretField( @@ -34,9 +41,9 @@ class Input(BlockSchema): description="The original home of this content, if it was originally published elsewhere", placeholder="https://yourblog.com/original-post", ) - publish_status: str = SchemaField( - description="The publish status: 'public', 'draft', or 'unlisted'", - placeholder="public", + publish_status: PublishToMediumStatus = SchemaField( + description="The publish status", + placeholder=PublishToMediumStatus.DRAFT, ) license: str = SchemaField( default="all-rights-reserved", @@ -79,7 +86,7 @@ def __init__(self): "tags": ["test", "automation"], "license": "all-rights-reserved", "notify_followers": False, - "publish_status": "draft", + "publish_status": PublishToMediumStatus.DRAFT.value, "api_key": "your_test_api_key", }, test_output=[ diff --git a/autogpt_platform/backend/backend/blocks/reddit.py b/autogpt_platform/backend/backend/blocks/reddit.py index 065436ae7321..9e4f3f3aca0b 100644 --- a/autogpt_platform/backend/backend/blocks/reddit.py +++ b/autogpt_platform/backend/backend/blocks/reddit.py @@ -2,10 +2,10 @@ from typing import Iterator import praw -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import BlockSecret, SecretField +from backend.data.model import BlockSecret, SchemaField, SecretField from backend.util.mock import MockObject @@ -48,25 +48,25 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit: class GetRedditPostsBlock(Block): class Input(BlockSchema): - subreddit: str = Field(description="Subreddit name") - creds: RedditCredentials = Field( + subreddit: str = SchemaField(description="Subreddit name") + creds: RedditCredentials = SchemaField( description="Reddit credentials", default=RedditCredentials(), ) - last_minutes: int | None = Field( + last_minutes: int | None = SchemaField( description="Post time to stop minutes ago while fetching posts", default=None, ) - last_post: str | None = Field( + last_post: str | None = SchemaField( description="Post ID to stop when reached while fetching posts", default=None, ) - post_limit: int | None = Field( + post_limit: int | None = SchemaField( description="Number of posts to fetch", default=10 ) class Output(BlockSchema): - post: RedditPost = Field(description="Reddit post") + post: RedditPost = SchemaField(description="Reddit post") def __init__(self): super().__init__( @@ -140,13 +140,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class PostRedditCommentBlock(Block): class Input(BlockSchema): - creds: RedditCredentials = Field( + creds: RedditCredentials = SchemaField( description="Reddit credentials", default=RedditCredentials() ) - data: RedditComment = Field(description="Reddit comment") + data: RedditComment = SchemaField(description="Reddit comment") class Output(BlockSchema): - comment_id: str + comment_id: str = SchemaField(description="Posted comment ID") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/search.py b/autogpt_platform/backend/backend/blocks/search.py index ecd63e2ee658..27a4322ce6a7 100644 --- a/autogpt_platform/backend/backend/blocks/search.py +++ b/autogpt_platform/backend/backend/blocks/search.py @@ -17,11 +17,13 @@ def get_request(cls, url: str, json=False) -> Any: class GetWikipediaSummaryBlock(Block, GetRequest): class Input(BlockSchema): - topic: str + topic: str = SchemaField(description="The topic to fetch the summary for") class Output(BlockSchema): - summary: str - error: str + summary: str = SchemaField(description="The summary of the given topic") + error: str = SchemaField( + description="Error message if the summary cannot be retrieved" + ) def __init__(self): super().__init__( @@ -46,11 +48,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class SearchTheWebBlock(Block, GetRequest): class Input(BlockSchema): - query: str # The search query + query: str = SchemaField(description="The search query to search the web for") class Output(BlockSchema): - results: str # The search results including content from top 5 URLs - error: str # Error message if the search fails + results: str = SchemaField( + description="The search results including content from top 5 URLs" + ) + error: str = SchemaField(description="Error message if the search fails") def __init__(self): super().__init__( @@ -80,7 +84,7 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class ExtractWebsiteContentBlock(Block, GetRequest): class Input(BlockSchema): - url: str # The URL to scrape + url: str = SchemaField(description="The URL to scrape the content from") raw_content: bool = SchemaField( default=False, title="Raw Content", @@ -89,8 +93,10 @@ class Input(BlockSchema): ) class Output(BlockSchema): - content: str # The scraped content from the URL - error: str + content: str = SchemaField(description="The scraped content from the given URL") + error: str = SchemaField( + description="Error message if the content cannot be retrieved" + ) def __init__(self): super().__init__( @@ -116,15 +122,26 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class GetWeatherInformationBlock(Block, GetRequest): class Input(BlockSchema): - location: str + location: str = SchemaField( + description="Location to get weather information for" + ) api_key: BlockSecret = SecretField(key="openweathermap_api_key") - use_celsius: bool = True + use_celsius: bool = SchemaField( + default=True, + description="Whether to use Celsius or Fahrenheit for temperature", + ) class Output(BlockSchema): - temperature: str - humidity: str - condition: str - error: str + temperature: str = SchemaField( + description="Temperature in the specified location" + ) + humidity: str = SchemaField(description="Humidity in the specified location") + condition: str = SchemaField( + description="Weather condition in the specified location" + ) + error: str = SchemaField( + description="Error message if the weather information cannot be retrieved" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/talking_head.py b/autogpt_platform/backend/backend/blocks/talking_head.py index e93b69ed8547..f4497d85ffab 100644 --- a/autogpt_platform/backend/backend/blocks/talking_head.py +++ b/autogpt_platform/backend/backend/blocks/talking_head.py @@ -13,7 +13,8 @@ class Input(BlockSchema): key="did_api_key", description="D-ID API Key" ) script_input: str = SchemaField( - description="The text input for the script", default="Welcome to AutoGPT" + description="The text input for the script", + placeholder="Welcome to AutoGPT", ) provider: Literal["microsoft", "elevenlabs", "amazon"] = SchemaField( description="The voice provider to use", default="microsoft" diff --git a/autogpt_platform/backend/backend/blocks/text.py b/autogpt_platform/backend/backend/blocks/text.py index da287b94fa5f..1d8c050fa147 100644 --- a/autogpt_platform/backend/backend/blocks/text.py +++ b/autogpt_platform/backend/backend/blocks/text.py @@ -2,9 +2,9 @@ from typing import Any from jinja2 import BaseLoader, Environment -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField from backend.util import json jinja = Environment(loader=BaseLoader()) @@ -12,15 +12,17 @@ class MatchTextPatternBlock(Block): class Input(BlockSchema): - text: Any = Field(description="Text to match") - match: str = Field(description="Pattern (Regex) to match") - data: Any = Field(description="Data to be forwarded to output") - case_sensitive: bool = Field(description="Case sensitive match", default=True) - dot_all: bool = Field(description="Dot matches all", default=True) + text: Any = SchemaField(description="Text to match") + match: str = SchemaField(description="Pattern (Regex) to match") + data: Any = SchemaField(description="Data to be forwarded to output") + case_sensitive: bool = SchemaField( + description="Case sensitive match", default=True + ) + dot_all: bool = SchemaField(description="Dot matches all", default=True) class Output(BlockSchema): - positive: Any = Field(description="Output data if match is found") - negative: Any = Field(description="Output data if match is not found") + positive: Any = SchemaField(description="Output data if match is found") + negative: Any = SchemaField(description="Output data if match is not found") def __init__(self): super().__init__( @@ -64,15 +66,17 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class ExtractTextInformationBlock(Block): class Input(BlockSchema): - text: Any = Field(description="Text to parse") - pattern: str = Field(description="Pattern (Regex) to parse") - group: int = Field(description="Group number to extract", default=0) - case_sensitive: bool = Field(description="Case sensitive match", default=True) - dot_all: bool = Field(description="Dot matches all", default=True) + text: Any = SchemaField(description="Text to parse") + pattern: str = SchemaField(description="Pattern (Regex) to parse") + group: int = SchemaField(description="Group number to extract", default=0) + case_sensitive: bool = SchemaField( + description="Case sensitive match", default=True + ) + dot_all: bool = SchemaField(description="Dot matches all", default=True) class Output(BlockSchema): - positive: str = Field(description="Extracted text") - negative: str = Field(description="Original text") + positive: str = SchemaField(description="Extracted text") + negative: str = SchemaField(description="Original text") def __init__(self): super().__init__( @@ -116,11 +120,15 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class FillTextTemplateBlock(Block): class Input(BlockSchema): - values: dict[str, Any] = Field(description="Values (dict) to be used in format") - format: str = Field(description="Template to format the text using `values`") + values: dict[str, Any] = SchemaField( + description="Values (dict) to be used in format" + ) + format: str = SchemaField( + description="Template to format the text using `values`" + ) class Output(BlockSchema): - output: str + output: str = SchemaField(description="Formatted text") def __init__(self): super().__init__( @@ -155,11 +163,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class CombineTextsBlock(Block): class Input(BlockSchema): - input: list[str] = Field(description="text input to combine") - delimiter: str = Field(description="Delimiter to combine texts", default="") + input: list[str] = SchemaField(description="text input to combine") + delimiter: str = SchemaField( + description="Delimiter to combine texts", default="" + ) class Output(BlockSchema): - output: str = Field(description="Combined text") + output: str = SchemaField(description="Combined text") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/time_blocks.py b/autogpt_platform/backend/backend/blocks/time_blocks.py index 9e95d428b9cc..eb886b5352c8 100644 --- a/autogpt_platform/backend/backend/blocks/time_blocks.py +++ b/autogpt_platform/backend/backend/blocks/time_blocks.py @@ -3,14 +3,22 @@ from typing import Any, Union from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class GetCurrentTimeBlock(Block): class Input(BlockSchema): - trigger: str + trigger: str = SchemaField( + description="Trigger any data to output the current time" + ) + format: str = SchemaField( + description="Format of the time to output", default="%H:%M:%S" + ) class Output(BlockSchema): - time: str + time: str = SchemaField( + description="Current time in the specified format (default: %H:%M:%S)" + ) def __init__(self): super().__init__( @@ -20,25 +28,38 @@ def __init__(self): input_schema=GetCurrentTimeBlock.Input, output_schema=GetCurrentTimeBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{time}"}, + {"trigger": "Hello"}, + {"trigger": "Hello", "format": "%H:%M"}, ], test_output=[ ("time", lambda _: time.strftime("%H:%M:%S")), + ("time", lambda _: time.strftime("%H:%M")), ], ) def run(self, input_data: Input, **kwargs) -> BlockOutput: - current_time = time.strftime("%H:%M:%S") + current_time = time.strftime(input_data.format) yield "time", current_time class GetCurrentDateBlock(Block): class Input(BlockSchema): - trigger: str - offset: Union[int, str] + trigger: str = SchemaField( + description="Trigger any data to output the current date" + ) + offset: Union[int, str] = SchemaField( + title="Days Offset", + description="Offset in days from the current date", + default=0, + ) + format: str = SchemaField( + description="Format of the date to output", default="%Y-%m-%d" + ) class Output(BlockSchema): - date: str + date: str = SchemaField( + description="Current date in the specified format (default: YYYY-MM-DD)" + ) def __init__(self): super().__init__( @@ -48,7 +69,8 @@ def __init__(self): input_schema=GetCurrentDateBlock.Input, output_schema=GetCurrentDateBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{date}", "offset": "7"}, + {"trigger": "Hello", "offset": "7"}, + {"trigger": "Hello", "offset": "7", "format": "%m/%d/%Y"}, ], test_output=[ ( @@ -56,6 +78,12 @@ def __init__(self): lambda t: abs(datetime.now() - datetime.strptime(t, "%Y-%m-%d")) < timedelta(days=8), # 7 days difference + 1 day error margin. ), + ( + "date", + lambda t: abs(datetime.now() - datetime.strptime(t, "%m/%d/%Y")) + < timedelta(days=8), + # 7 days difference + 1 day error margin. + ), ], ) @@ -65,15 +93,23 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: except ValueError: offset = 0 current_date = datetime.now() - timedelta(days=offset) - yield "date", current_date.strftime("%Y-%m-%d") + yield "date", current_date.strftime(input_data.format) class GetCurrentDateAndTimeBlock(Block): class Input(BlockSchema): - trigger: str + trigger: str = SchemaField( + description="Trigger any data to output the current date and time" + ) + format: str = SchemaField( + description="Format of the date and time to output", + default="%Y-%m-%d %H:%M:%S", + ) class Output(BlockSchema): - date_time: str + date_time: str = SchemaField( + description="Current date and time in the specified format (default: YYYY-MM-DD HH:MM:SS)" + ) def __init__(self): super().__init__( @@ -83,7 +119,7 @@ def __init__(self): input_schema=GetCurrentDateAndTimeBlock.Input, output_schema=GetCurrentDateAndTimeBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{date_time}"}, + {"trigger": "Hello"}, ], test_output=[ ( @@ -97,20 +133,29 @@ def __init__(self): ) def run(self, input_data: Input, **kwargs) -> BlockOutput: - current_date_time = time.strftime("%Y-%m-%d %H:%M:%S") + current_date_time = time.strftime(input_data.format) yield "date_time", current_date_time class CountdownTimerBlock(Block): class Input(BlockSchema): - input_message: Any = "timer finished" - seconds: Union[int, str] = 0 - minutes: Union[int, str] = 0 - hours: Union[int, str] = 0 - days: Union[int, str] = 0 + input_message: Any = SchemaField( + description="Message to output after the timer finishes", + default="timer finished", + ) + seconds: Union[int, str] = SchemaField( + description="Duration in seconds", default=0 + ) + minutes: Union[int, str] = SchemaField( + description="Duration in minutes", default=0 + ) + hours: Union[int, str] = SchemaField(description="Duration in hours", default=0) + days: Union[int, str] = SchemaField(description="Duration in days", default=0) class Output(BlockSchema): - output_message: str + output_message: str = SchemaField( + description="Message after the timer finishes" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py index cec50109bd4d..b4f0259d98b1 100644 --- a/autogpt_platform/backend/backend/blocks/youtube.py +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -7,9 +7,10 @@ from backend.data.model import SchemaField -class TranscribeYouTubeVideoBlock(Block): +class TranscribeYoutubeVideoBlock(Block): class Input(BlockSchema): youtube_url: str = SchemaField( + title="YouTube URL", description="The URL of the YouTube video to transcribe", placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ", ) @@ -24,8 +25,8 @@ class Output(BlockSchema): def __init__(self): super().__init__( id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c", - input_schema=TranscribeYouTubeVideoBlock.Input, - output_schema=TranscribeYouTubeVideoBlock.Output, + input_schema=TranscribeYoutubeVideoBlock.Input, + output_schema=TranscribeYoutubeVideoBlock.Output, description="Transcribes a YouTube video.", categories={BlockCategory.SOCIAL}, test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"}, diff --git a/autogpt_platform/backend/test/util/test_service.py b/autogpt_platform/backend/test/util/test_service.py index 458e18b76d3a..e03063fff32d 100644 --- a/autogpt_platform/backend/test/util/test_service.py +++ b/autogpt_platform/backend/test/util/test_service.py @@ -5,7 +5,7 @@ TEST_SERVICE_PORT = 8765 -class TestService(AppService): +class ServiceTest(AppService): def __init__(self): super().__init__(port=TEST_SERVICE_PORT) @@ -27,8 +27,8 @@ async def add_async(a: int, b: int) -> int: @pytest.mark.asyncio(scope="session") async def test_service_creation(server): - with TestService(): - client = get_service_client(TestService, TEST_SERVICE_PORT) + with ServiceTest(): + client = get_service_client(ServiceTest, TEST_SERVICE_PORT) assert client.add(5, 3) == 8 assert client.subtract(10, 4) == 6 assert client.fun_with_async(5, 3) == 8 From 6f3828fc994a8441ac4a860fb050def2da18042e Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:49:56 +0100 Subject: [PATCH 05/24] fix(dockercompose): Fix db manager connection (#8377) * add db host * remove unused variable --- autogpt_platform/backend/backend/util/settings.py | 5 ----- autogpt_platform/docker-compose.platform.yml | 1 + 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 6e552fadb83d..1ac875fdf92a 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -117,11 +117,6 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="The port for agent server daemon to run on", ) - database_api_host: str = Field( - default="0.0.0.0", - description="The host for database server API to run on", - ) - database_api_port: int = Field( default=8005, description="The port for database server API to run on", diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index a0b8f670acd6..020f32201fdc 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -103,6 +103,7 @@ services: - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - AGENTSERVER_HOST=rest_server + - DATABASEMANAGER_HOST=0.0.0.0 ports: - "8002:8000" networks: From 30376a8ec87600e9d73ad2f3bbbc6a88faaba962 Mon Sep 17 00:00:00 2001 From: Emmanuel Ferdman Date: Sat, 19 Oct 2024 18:52:41 +0300 Subject: [PATCH 06/24] fix: update types reference (#8366) Signed-off-by: Emmanuel Ferdman --- docs/content/server/new_blocks.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/server/new_blocks.md b/docs/content/server/new_blocks.md index 8cc5a5253836..1c7c3684cf7f 100644 --- a/docs/content/server/new_blocks.md +++ b/docs/content/server/new_blocks.md @@ -196,7 +196,7 @@ class BlockWithAPIKeyAndOAuth(Block): The credentials will be automagically injected by the executor in the back end. -The `APIKeyCredentials` and `OAuth2Credentials` models are defined [here](https://github.com/Significant-Gravitas/AutoGPT/blob/master/rnd/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py). +The `APIKeyCredentials` and `OAuth2Credentials` models are defined [here](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py). To use them in e.g. an API request, you can either access the token directly: ```python From f19ed9f652cafbf9ad58e1ef7a4b1ff8961e6417 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Mon, 21 Oct 2024 10:39:59 +0100 Subject: [PATCH 07/24] fix spelling mistake (#8380) --- autogpt_platform/infra/helm/autogpt-server/values.dev.yaml | 2 +- autogpt_platform/infra/helm/autogpt-server/values.prod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml b/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml index 19349017aef4..6e86991623ec 100644 --- a/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml +++ b/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml @@ -58,7 +58,7 @@ resources: livenessProbe: httpGet: - path: /heath + path: /health port: 8006 initialDelaySeconds: 30 periodSeconds: 10 diff --git a/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml b/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml index 0e73afa33175..a1f979bf6f6c 100644 --- a/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml +++ b/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml @@ -80,7 +80,7 @@ livenessProbe: failureThreshold: 6 readinessProbe: httpGet: - path: /heath + path: /health port: 8006 initialDelaySeconds: 30 periodSeconds: 10 From e4a9c8216f5ad616ff079494912f125cc90458c0 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Mon, 21 Oct 2024 10:55:16 +0100 Subject: [PATCH 08/24] fix(platform/infra): Fix liveness probe (#8382) fix liveness probe --- autogpt_platform/infra/helm/autogpt-server/values.prod.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml b/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml index a1f979bf6f6c..eb314a899d1b 100644 --- a/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml +++ b/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml @@ -72,7 +72,7 @@ cors: livenessProbe: httpGet: - path: /heath + path: /health port: 8006 initialDelaySeconds: 30 periodSeconds: 10 From 9f4be6f0c9dbfca2d39371bffad498bd1fbffe07 Mon Sep 17 00:00:00 2001 From: Bently Date: Mon, 21 Oct 2024 13:18:18 +0100 Subject: [PATCH 09/24] feat(blocks): fix output divider bar being in the wrong place (#8385) --- autogpt_platform/frontend/src/components/NodeOutputs.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/src/components/NodeOutputs.tsx b/autogpt_platform/frontend/src/components/NodeOutputs.tsx index c7b7dc57100a..fcedf1f408ce 100644 --- a/autogpt_platform/frontend/src/components/NodeOutputs.tsx +++ b/autogpt_platform/frontend/src/components/NodeOutputs.tsx @@ -23,7 +23,6 @@ export default function NodeOutputs({ Pin: {beautifyString(pin)} -
Data:
@@ -37,6 +36,7 @@ export default function NodeOutputs({ ))}
+
))} From 2437f58849fed550f0b407453f79cda4d324a932 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Mon, 21 Oct 2024 08:05:07 -0500 Subject: [PATCH 10/24] ci: create dependabot config (#8372) * ci: create dependabot * ci: target the dev branch for dependabot * ci: group prs * ci: group updates --------- Co-authored-by: Aarushi <50577581+aarushik93@users.noreply.github.com> --- .github/dependabot.yml | 179 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..68b6fc2b7c27 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,179 @@ +version: 2 +updates: + # autogpt_libs (Poetry project) + - package-ecosystem: "pip" + directory: "autogpt_platform/autogpt_libs" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + # backend (Poetry project) + - package-ecosystem: "pip" + directory: "autogpt_platform/backend" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # frontend (Next.js project) + - package-ecosystem: "npm" + directory: "autogpt_platform/frontend" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # infra (Terraform) + - package-ecosystem: "terraform" + directory: "autogpt_platform/infra" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # market (Poetry project) + - package-ecosystem: "pip" + directory: "autogpt_platform/market" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Docker + - package-ecosystem: "docker" + directory: "autogpt_platform/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Submodules + - package-ecosystem: "gitsubmodule" + directory: "autogpt_platform/supabase" + schedule: + interval: "weekly" + open-pull-requests-limit: 1 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Docs + - package-ecosystem: 'pip' + directory: "docs/" + schedule: + interval: "weekly" + open-pull-requests-limit: 1 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" From 5e386fda337e389d1f9dee38731edd216672d72e Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:46:26 +0100 Subject: [PATCH 11/24] fix(platform/dockercompose): Put removed services back in (#8386) put removed services back in --- autogpt_platform/docker-compose.yml | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/autogpt_platform/docker-compose.yml b/autogpt_platform/docker-compose.yml index dcde6567f178..e917d670bb6e 100644 --- a/autogpt_platform/docker-compose.yml +++ b/autogpt_platform/docker-compose.yml @@ -96,6 +96,37 @@ services: file: ./supabase/docker/docker-compose.yml service: rest + + realtime: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: realtime + + storage: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: storage + + imgproxy: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: imgproxy + + meta: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: meta + + functions: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: functions + analytics: <<: *supabase-services extends: From 1622a4aaa834388a2aa833eec01821da80107d7f Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Tue, 22 Oct 2024 07:33:54 -0500 Subject: [PATCH 12/24] refactor(backend): Move credentials storage to prisma user (#8283) * feat(frontend,backend): testing * feat: testing * feat(backend): it works for reading email * feat(backend): more docs on google * fix(frontend,backend): formatting * feat(backend): more logigin (i know this should be debug) * feat(backend): make real the default scopes * feat(backend): tests and linting * fix: code review prep * feat: sheets block * feat: liniting * Update route.ts * Update autogpt_platform/backend/backend/integrations/oauth/google.py Co-authored-by: Reinier van der Leer * Update autogpt_platform/backend/backend/server/routers/integrations.py Co-authored-by: Reinier van der Leer * fix: revert opener change * feat(frontend): add back opener required to work on mac edge * feat(frontend): drop typing list import from gmail * fix: code review comments * feat: code review changes * feat: code review changes * fix(backend): move from asserts to checks so they don't get optimized away in the future * fix(backend): code review changes * fix(backend): remove google specific check * fix: add typing * fix: only enable google blocks when oauth is configured for google * fix: errors are real and valid outputs always when output * fix(backend): add provider detail for debuging scope declines * Update autogpt_platform/frontend/src/components/integrations/credentials-input.tsx Co-authored-by: Reinier van der Leer * fix(frontend): enhance with comment, typeof error isn't known so this is best way to ensure the stringifyication will work * feat: code review change requests * fix: linting * fix: reduce error catching * fix: doc messages in code * fix: check the correct scopes object :smile: * fix: remove double (and not needed) try catch * fix: lint * fix: scopes * feat: handle the default scopes better * feat: better email objectification * feat: process attachements turns out an email doesn't need a body * fix: lint * Update google.py * Update autogpt_platform/backend/backend/data/block.py Co-authored-by: Reinier van der Leer * fix: quit trying and except failure * Update autogpt_platform/backend/backend/server/routers/integrations.py Co-authored-by: Reinier van der Leer * feat: don't allow expired states * fix: clarify function name and purpose * feat: code links updates * feat: additional docs on adding a block * fix: type hint missing which means the block won't work * fix: linting * fix: docs formatting * Update issues.py * fix: improve the naming * fix: formatting * Update new_blocks.md * Update new_blocks.md * feat: better docs on what the args mean * feat: more details on yield * Update new_blocks.md * fix: remove ignore from docs build * feat: initial migration * feat: migration tested with supabase-> prisma data location * add custom migrations and script * update migration command * formatting and linting * updated migration script * add direct db url * add find files * rename * use binary instead of source * temp adding supabase * remove unused functions * adding missed merge * fix: commit hash for lock * ci: fix lint * fix: minor bugs that prevented connecting and migrating to dbs and auth * fix: linting * fix: missed await * fix(backend): phase one pr updates * fix: handle error with returning user object from database_manager * fix: linting * Address comments * Make the migration safe * Update migration doc * Move misplaced model functions * Grammar * Revert lock * Remove irrelevant changes * Remove irrelevant changes * Avoid adding trigger on public schema --------- Co-authored-by: Reinier van der Leer Co-authored-by: Zamil Majdy Co-authored-by: Aarushi Co-authored-by: Aarushi <50577581+aarushik93@users.noreply.github.com> --- .../store.py | 54 ++++++++----------- .../types.py | 7 +-- autogpt_platform/backend/Dockerfile | 2 +- autogpt_platform/backend/README.advanced.md | 4 +- autogpt_platform/backend/README.md | 2 +- autogpt_platform/backend/backend/data/user.py | 20 +++++++ .../backend/backend/executor/database.py | 5 ++ .../backend/backend/executor/manager.py | 9 ++-- .../backend/integrations/creds_manager.py | 11 ++-- .../backend/server/integrations/router.py | 10 ++-- .../backend/backend/server/rest_api.py | 3 +- .../backend/backend/util/cache.py | 28 ++++++---- .../migration.sql | 2 + .../migration.sql | 27 ++++++++++ autogpt_platform/backend/schema.prisma | 1 + autogpt_platform/docker-compose.platform.yml | 2 +- autogpt_platform/docker-compose.yml | 1 - 17 files changed, 123 insertions(+), 65 deletions(-) create mode 100644 autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql create mode 100644 autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py index 44cc9b60f4a1..f4ce921937e7 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py @@ -1,10 +1,10 @@ import secrets from datetime import datetime, timedelta, timezone -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING if TYPE_CHECKING: from redis import Redis - from supabase import Client + from backend.executor.database import DatabaseManager from autogpt_libs.utils.synchronize import RedisKeyedMutex @@ -18,8 +18,8 @@ class SupabaseIntegrationCredentialsStore: - def __init__(self, supabase: "Client", redis: "Redis"): - self.supabase = supabase + def __init__(self, redis: "Redis", db: "DatabaseManager"): + self.db_manager: DatabaseManager = db self.locks = RedisKeyedMutex(redis) def add_creds(self, user_id: str, credentials: Credentials) -> None: @@ -35,7 +35,9 @@ def add_creds(self, user_id: str, credentials: Credentials) -> None: def get_all_creds(self, user_id: str) -> list[Credentials]: user_metadata = self._get_user_metadata(user_id) - return UserMetadata.model_validate(user_metadata).integration_credentials + return UserMetadata.model_validate( + user_metadata.model_dump() + ).integration_credentials def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None: all_credentials = self.get_all_creds(user_id) @@ -90,9 +92,7 @@ def delete_creds_by_id(self, user_id: str, credentials_id: str) -> None: ] self._set_user_integration_creds(user_id, filtered_credentials) - async def store_state_token( - self, user_id: str, provider: str, scopes: list[str] - ) -> str: + def store_state_token(self, user_id: str, provider: str, scopes: list[str]) -> str: token = secrets.token_urlsafe(32) expires_at = datetime.now(timezone.utc) + timedelta(minutes=10) @@ -105,17 +105,17 @@ async def store_state_token( with self.locked_user_metadata(user_id): user_metadata = self._get_user_metadata(user_id) - oauth_states = user_metadata.get("integration_oauth_states", []) + oauth_states = user_metadata.integration_oauth_states oauth_states.append(state.model_dump()) - user_metadata["integration_oauth_states"] = oauth_states + user_metadata.integration_oauth_states = oauth_states - self.supabase.auth.admin.update_user_by_id( - user_id, {"user_metadata": user_metadata} + self.db_manager.update_user_metadata( + user_id=user_id, metadata=user_metadata ) return token - async def get_any_valid_scopes_from_state_token( + def get_any_valid_scopes_from_state_token( self, user_id: str, token: str, provider: str ) -> list[str]: """ @@ -126,7 +126,7 @@ async def get_any_valid_scopes_from_state_token( THE CODE FOR TOKENS. """ user_metadata = self._get_user_metadata(user_id) - oauth_states = user_metadata.get("integration_oauth_states", []) + oauth_states = user_metadata.integration_oauth_states now = datetime.now(timezone.utc) valid_state = next( @@ -145,10 +145,10 @@ async def get_any_valid_scopes_from_state_token( return [] - async def verify_state_token(self, user_id: str, token: str, provider: str) -> bool: + def verify_state_token(self, user_id: str, token: str, provider: str) -> bool: with self.locked_user_metadata(user_id): user_metadata = self._get_user_metadata(user_id) - oauth_states = user_metadata.get("integration_oauth_states", []) + oauth_states = user_metadata.integration_oauth_states now = datetime.now(timezone.utc) valid_state = next( @@ -165,10 +165,8 @@ async def verify_state_token(self, user_id: str, token: str, provider: str) -> b if valid_state: # Remove the used state oauth_states.remove(valid_state) - user_metadata["integration_oauth_states"] = oauth_states - self.supabase.auth.admin.update_user_by_id( - user_id, {"user_metadata": user_metadata} - ) + user_metadata.integration_oauth_states = oauth_states + self.db_manager.update_user_metadata(user_id, user_metadata) return True return False @@ -177,19 +175,13 @@ def _set_user_integration_creds( self, user_id: str, credentials: list[Credentials] ) -> None: raw_metadata = self._get_user_metadata(user_id) - raw_metadata.update( - {"integration_credentials": [c.model_dump() for c in credentials]} - ) - self.supabase.auth.admin.update_user_by_id( - user_id, {"user_metadata": raw_metadata} - ) + raw_metadata.integration_credentials = [c.model_dump() for c in credentials] + self.db_manager.update_user_metadata(user_id, raw_metadata) def _get_user_metadata(self, user_id: str) -> UserMetadataRaw: - response = self.supabase.auth.admin.get_user_by_id(user_id) - if not response.user: - raise ValueError(f"User with ID {user_id} not found") - return cast(UserMetadataRaw, response.user.user_metadata) + metadata: UserMetadataRaw = self.db_manager.get_user_metadata(user_id=user_id) + return metadata def locked_user_metadata(self, user_id: str): - key = (self.supabase.supabase_url, f"user:{user_id}", "metadata") + key = (self.db_manager, f"user:{user_id}", "metadata") return self.locks.locked(key) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py index da39f6a842c2..0f973bb52484 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py @@ -56,6 +56,7 @@ class OAuthState(BaseModel): token: str provider: str expires_at: int + scopes: list[str] """Unix timestamp (seconds) indicating when this OAuth state expires""" @@ -64,6 +65,6 @@ class UserMetadata(BaseModel): integration_oauth_states: list[OAuthState] = Field(default_factory=list) -class UserMetadataRaw(TypedDict, total=False): - integration_credentials: list[dict] - integration_oauth_states: list[dict] +class UserMetadataRaw(BaseModel): + integration_credentials: list[dict] = Field(default_factory=list) + integration_oauth_states: list[dict] = Field(default_factory=list) diff --git a/autogpt_platform/backend/Dockerfile b/autogpt_platform/backend/Dockerfile index f697db11982c..5795398d1fae 100644 --- a/autogpt_platform/backend/Dockerfile +++ b/autogpt_platform/backend/Dockerfile @@ -8,7 +8,7 @@ WORKDIR /app # Install build dependencies RUN apt-get update \ - && apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev postgresql-client git \ + && apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev libpq5 gettext libz-dev libssl-dev postgresql-client git \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/autogpt_platform/backend/README.advanced.md b/autogpt_platform/backend/README.advanced.md index 829a3d79262a..09e0f90fcc25 100644 --- a/autogpt_platform/backend/README.advanced.md +++ b/autogpt_platform/backend/README.advanced.md @@ -37,7 +37,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes 5. Generate the Prisma client ```sh - poetry run prisma generate --schema postgres/schema.prisma + poetry run prisma generate ``` @@ -61,7 +61,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes ```sh cd ../backend - prisma migrate dev --schema postgres/schema.prisma + prisma migrate deploy ``` ## Running The Server diff --git a/autogpt_platform/backend/README.md b/autogpt_platform/backend/README.md index fc0c6b3944d0..00194b304e1c 100644 --- a/autogpt_platform/backend/README.md +++ b/autogpt_platform/backend/README.md @@ -59,7 +59,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes ```sh docker compose up db redis -d - poetry run prisma migrate dev + poetry run prisma migrate deploy ``` ## Running The Server diff --git a/autogpt_platform/backend/backend/data/user.py b/autogpt_platform/backend/backend/data/user.py index db60eea235cb..6c8696379e37 100644 --- a/autogpt_platform/backend/backend/data/user.py +++ b/autogpt_platform/backend/backend/data/user.py @@ -1,6 +1,8 @@ from typing import Optional +from autogpt_libs.supabase_integration_credentials_store.types import UserMetadataRaw from fastapi import HTTPException +from prisma import Json from prisma.models import User from backend.data.db import prisma @@ -48,3 +50,21 @@ async def create_default_user(enable_auth: str) -> Optional[User]: ) return User.model_validate(user) return None + + +async def get_user_metadata(user_id: str) -> UserMetadataRaw: + user = await User.prisma().find_unique_or_raise( + where={"id": user_id}, + ) + return ( + UserMetadataRaw.model_validate(user.metadata) + if user.metadata + else UserMetadataRaw() + ) + + +async def update_user_metadata(user_id: str, metadata: UserMetadataRaw): + await User.prisma().update( + where={"id": user_id}, + data={"metadata": Json(metadata.model_dump())}, + ) diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index 8257d5c8c07c..aea2dd4177fc 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -16,6 +16,7 @@ ) from backend.data.graph import get_graph, get_node from backend.data.queue import RedisEventQueue +from backend.data.user import get_user_metadata, update_user_metadata from backend.util.service import AppService, expose from backend.util.settings import Config @@ -73,3 +74,7 @@ def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R: Callable[[Any, str, int, str, dict[str, str], float, float], int], exposed_run_and_wait(user_credit_model.spend_credits), ) + + # User + User Metadata + get_user_metadata = exposed_run_and_wait(get_user_metadata) + update_user_metadata = exposed_run_and_wait(update_user_metadata) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index d756dfc6ecf8..4e6cfe9e3bbb 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -31,7 +31,7 @@ from backend.data.model import CREDENTIALS_FIELD_NAME, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util import json -from backend.util.cache import thread_cached_property +from backend.util.cache import thread_cached from backend.util.decorator import error_logged, time_measured from backend.util.logging import configure_logging from backend.util.process import set_service_name @@ -417,7 +417,7 @@ def on_node_executor_start(cls): redis.connect() cls.pid = os.getpid() cls.db_client = get_db_client() - cls.creds_manager = IntegrationCredentialsManager() + cls.creds_manager = IntegrationCredentialsManager(db_manager=cls.db_client) # Set up shutdown handlers cls.shutdown_lock = threading.Lock() @@ -670,7 +670,7 @@ def run_service(self): ) self.credentials_store = SupabaseIntegrationCredentialsStore( - self.supabase, redis.get_redis() + redis=redis.get_redis(), db=self.db_client ) self.executor = ProcessPoolExecutor( max_workers=self.pool_size, @@ -701,7 +701,7 @@ def cleanup(self): super().cleanup() - @thread_cached_property + @property def db_client(self) -> "DatabaseManager": return get_db_client() @@ -857,6 +857,7 @@ def _validate_node_input_credentials(self, graph: Graph, user_id: str): # ------- UTILITIES ------- # +@thread_cached def get_db_client() -> "DatabaseManager": from backend.executor import DatabaseManager diff --git a/autogpt_platform/backend/backend/integrations/creds_manager.py b/autogpt_platform/backend/backend/integrations/creds_manager.py index 6fcee8eecfee..3bfde70e2817 100644 --- a/autogpt_platform/backend/backend/integrations/creds_manager.py +++ b/autogpt_platform/backend/backend/integrations/creds_manager.py @@ -10,11 +10,10 @@ from redis.lock import Lock as RedisLock from backend.data import redis +from backend.executor.database import DatabaseManager from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings -from ..server.integrations.utils import get_supabase - logger = logging.getLogger(__name__) settings = Settings() @@ -51,10 +50,12 @@ class IntegrationCredentialsManager: cause so much latency that it's worth implementing. """ - def __init__(self): + def __init__(self, db_manager: DatabaseManager): redis_conn = redis.get_redis() self._locks = RedisKeyedMutex(redis_conn) - self.store = SupabaseIntegrationCredentialsStore(get_supabase(), redis_conn) + self.store = SupabaseIntegrationCredentialsStore( + redis=redis_conn, db=db_manager + ) def create(self, user_id: str, credentials: Credentials) -> None: return self.store.add_creds(user_id, credentials) @@ -131,7 +132,7 @@ def delete(self, user_id: str, credentials_id: str) -> None: def _acquire_lock(self, user_id: str, credentials_id: str, *args: str) -> RedisLock: key = ( - self.store.supabase.supabase_url, + self.store.db_manager, f"user:{user_id}", f"credentials:{credentials_id}", *args, diff --git a/autogpt_platform/backend/backend/server/integrations/router.py b/autogpt_platform/backend/backend/server/integrations/router.py index a44954b37157..440ce7921cbe 100644 --- a/autogpt_platform/backend/backend/server/integrations/router.py +++ b/autogpt_platform/backend/backend/server/integrations/router.py @@ -10,6 +10,7 @@ from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request from pydantic import BaseModel, Field, SecretStr +from backend.executor.manager import get_db_client from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings @@ -19,7 +20,8 @@ logger = logging.getLogger(__name__) settings = Settings() router = APIRouter() -creds_manager = IntegrationCredentialsManager() + +creds_manager = IntegrationCredentialsManager(db_manager=get_db_client()) class LoginResponse(BaseModel): @@ -41,7 +43,7 @@ async def login( requested_scopes = scopes.split(",") if scopes else [] # Generate and store a secure random state token along with the scopes - state_token = await creds_manager.store.store_state_token( + state_token = creds_manager.store.store_state_token( user_id, provider, requested_scopes ) @@ -70,12 +72,12 @@ async def callback( handler = _get_provider_oauth_handler(request, provider) # Verify the state token - if not await creds_manager.store.verify_state_token(user_id, state_token, provider): + if not creds_manager.store.verify_state_token(user_id, state_token, provider): logger.warning(f"Invalid or expired state token for user {user_id}") raise HTTPException(status_code=400, detail="Invalid or expired state token") try: - scopes = await creds_manager.store.get_any_valid_scopes_from_state_token( + scopes = creds_manager.store.get_any_valid_scopes_from_state_token( user_id, state_token, provider ) logger.debug(f"Retrieved scopes from state token: {scopes}") diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index 880d41817f21..7cb6988cfd9f 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -19,6 +19,7 @@ from backend.data.credit import get_block_costs, get_user_credit_model from backend.data.user import get_or_create_user from backend.executor import ExecutionManager, ExecutionScheduler +from backend.executor.manager import get_db_client from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.server.model import CreateGraph, SetGraphActiveVersion from backend.util.cache import thread_cached_property @@ -97,7 +98,7 @@ def run_service(self): tags=["integrations"], dependencies=[Depends(auth_middleware)], ) - self.integration_creds_manager = IntegrationCredentialsManager() + self.integration_creds_manager = IntegrationCredentialsManager(get_db_client()) api_router.include_router( backend.server.routers.analytics.router, diff --git a/autogpt_platform/backend/backend/util/cache.py b/autogpt_platform/backend/backend/util/cache.py index 30048e8781af..b4506dda47b8 100644 --- a/autogpt_platform/backend/backend/util/cache.py +++ b/autogpt_platform/backend/backend/util/cache.py @@ -1,21 +1,27 @@ import threading from functools import wraps -from typing import Callable, TypeVar +from typing import Callable, ParamSpec, TypeVar T = TypeVar("T") +P = ParamSpec("P") R = TypeVar("R") -def thread_cached_property(func: Callable[[T], R]) -> property: - local_cache = threading.local() +def thread_cached(func: Callable[P, R]) -> Callable[P, R]: + thread_local = threading.local() @wraps(func) - def wrapper(self: T) -> R: - if not hasattr(local_cache, "cache"): - local_cache.cache = {} - key = id(self) - if key not in local_cache.cache: - local_cache.cache[key] = func(self) - return local_cache.cache[key] + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + cache = getattr(thread_local, "cache", None) + if cache is None: + cache = thread_local.cache = {} + key = (args, tuple(sorted(kwargs.items()))) + if key not in cache: + cache[key] = func(*args, **kwargs) + return cache[key] + + return wrapper - return property(wrapper) + +def thread_cached_property(func: Callable[[T], R]) -> property: + return property(thread_cached(func)) diff --git a/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql b/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql new file mode 100644 index 000000000000..b3886efa030a --- /dev/null +++ b/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "User" ADD COLUMN "metadata" JSONB; diff --git a/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql b/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql new file mode 100644 index 000000000000..aa577c90e938 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql @@ -0,0 +1,27 @@ +--CreateFunction +CREATE OR REPLACE FUNCTION add_user_to_platform() RETURNS TRIGGER AS $$ +BEGIN + INSERT INTO platform."User" (id, email, "updatedAt") + VALUES (NEW.id, NEW.email, now()); + RETURN NEW; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +DO $$ +BEGIN + -- Check if the auth schema and users table exist + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'auth' + AND table_name = 'users' + ) THEN + -- Drop the trigger if it exists + DROP TRIGGER IF EXISTS user_added_to_platform ON auth.users; + + -- Create the trigger + CREATE TRIGGER user_added_to_platform + AFTER INSERT ON auth.users + FOR EACH ROW EXECUTE FUNCTION add_user_to_platform(); + END IF; +END $$; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index 3fab8dc2593d..b316e226d202 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -17,6 +17,7 @@ model User { name String? createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + metadata Json? // Relations AgentGraphs AgentGraph[] diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index 020f32201fdc..ba396c76eac3 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -8,7 +8,7 @@ services: develop: watch: - path: ./ - target: autogpt_platform/backend/migrate + target: autogpt_platform/backend/migrations action: rebuild depends_on: db: diff --git a/autogpt_platform/docker-compose.yml b/autogpt_platform/docker-compose.yml index e917d670bb6e..be6f1f49ede5 100644 --- a/autogpt_platform/docker-compose.yml +++ b/autogpt_platform/docker-compose.yml @@ -96,7 +96,6 @@ services: file: ./supabase/docker/docker-compose.yml service: rest - realtime: <<: *supabase-services extends: From 30a62f898ca8ab1fe193847620250923519ef9f3 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:45:30 +0100 Subject: [PATCH 13/24] feat(platform/ci) Set up deploys from dev (#8355) * ci with workload identity * temp update * update name * wip * update auth step * update provider name * remove audience * temp set to false * update registry naming * update context * update login * revert temp updates --- .../workflows/platform-autogpt-deploy.yaml | 152 ++++++++++++++++++ .../helm/autogpt-builder/values.dev.yaml | 4 +- .../infra/helm/autogpt-server/values.dev.yaml | 2 +- .../autogpt-websocket-server/values.dev.yaml | 2 +- .../infra/terraform/environments/dev.tfvars | 48 +++++- autogpt_platform/infra/terraform/main.tf | 1 + .../infra/terraform/modules/iam/main.tf | 27 ++++ .../infra/terraform/modules/iam/outputs.tf | 12 +- .../infra/terraform/modules/iam/variables.tf | 13 ++ autogpt_platform/infra/terraform/variables.tf | 16 ++ 10 files changed, 269 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/platform-autogpt-deploy.yaml diff --git a/.github/workflows/platform-autogpt-deploy.yaml b/.github/workflows/platform-autogpt-deploy.yaml new file mode 100644 index 000000000000..97c2fe78749e --- /dev/null +++ b/.github/workflows/platform-autogpt-deploy.yaml @@ -0,0 +1,152 @@ +name: AutoGPT Platform - Build, Push, and Deploy Dev Environment + +on: + push: + branches: [ dev ] + paths: + - 'autogpt_platform/backend/**' + - 'autogpt_platform/frontend/**' + - 'autogpt_platform/market/**' + +permissions: + contents: 'read' + id-token: 'write' + +env: + PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + GKE_CLUSTER: dev-gke-cluster + GKE_ZONE: us-central1-a + NAMESPACE: dev-agpt + +jobs: + build-push-deploy: + name: Build, Push, and Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - id: 'auth' + uses: 'google-github-actions/auth@v1' + with: + workload_identity_provider: 'projects/638488734936/locations/global/workloadIdentityPools/dev-pool/providers/github' + service_account: 'dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com' + token_format: 'access_token' + create_credentials_file: true + + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v1' + + - name: 'Configure Docker' + run: | + gcloud auth configure-docker us-east1-docker.pkg.dev + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + + - name: Check for changes + id: check_changes + run: | + git fetch origin dev + BACKEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false") + FRONTEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false") + MARKET_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false") + echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT + echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT + echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT + + - name: Get GKE credentials + uses: 'google-github-actions/get-gke-credentials@v1' + with: + cluster_name: ${{ env.GKE_CLUSTER }} + location: ${{ env.GKE_ZONE }} + + - name: Build and Push Backend + if: steps.check_changes.outputs.backend_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/backend/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Build and Push Frontend + if: steps.check_changes.outputs.frontend_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/frontend/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-dev/agpt-frontend-dev/agpt-frontend-dev:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Build and Push Market + if: steps.check_changes.outputs.market_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/market/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-dev/agpt-market-dev/agpt-market-dev:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + + - name: Set up Helm + uses: azure/setup-helm@v1 + with: + version: v3.4.0 + + - name: Deploy Backend + if: steps.check_changes.outputs.backend_changed == 'true' + run: | + helm upgrade autogpt-server ./autogpt-server \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-server/values.yaml \ + -f autogpt-server/values.dev.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Websocket + if: steps.check_changes.outputs.backend_changed == 'true' + run: | + helm upgrade autogpt-websocket-server ./autogpt-websocket-server \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-websocket-server/values.yaml \ + -f autogpt-websocket-server/values.dev.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Market + if: steps.check_changes.outputs.market_changed == 'true' + run: | + helm upgrade autogpt-market ./autogpt-market \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-market/values.yaml \ + -f autogpt-market/values.dev.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Frontend + if: steps.check_changes.outputs.frontend_changed == 'true' + run: | + helm upgrade autogpt-builder ./autogpt-builder \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-builder/values.yaml \ + -f autogpt-builder/values.dev.yaml \ + --set image.tag=${{ github.sha }} \ No newline at end of file diff --git a/autogpt_platform/infra/helm/autogpt-builder/values.dev.yaml b/autogpt_platform/infra/helm/autogpt-builder/values.dev.yaml index 1821acc24a39..128ea3ee44a5 100644 --- a/autogpt_platform/infra/helm/autogpt-builder/values.dev.yaml +++ b/autogpt_platform/infra/helm/autogpt-builder/values.dev.yaml @@ -1,9 +1,9 @@ # dev values, overwrite base values as needed. image: - repository: us-east1-docker.pkg.dev/agpt-dev/agpt-builder-dev/agpt-builder-dev + repository: us-east1-docker.pkg.dev/agpt-dev/agpt-frontend-dev/agpt-frontend-dev pullPolicy: Always - tag: "fe3d2a9" + tag: "latest" serviceAccount: annotations: diff --git a/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml b/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml index 6e86991623ec..b7488e0fd078 100644 --- a/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml +++ b/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml @@ -1,7 +1,7 @@ # dev values, overwrite base values as needed. image: - repository: us-east1-docker.pkg.dev/agpt-dev/agpt-server-dev/agpt-server-dev + repository: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev pullPolicy: Always tag: "latest" diff --git a/autogpt_platform/infra/helm/autogpt-websocket-server/values.dev.yaml b/autogpt_platform/infra/helm/autogpt-websocket-server/values.dev.yaml index a977f9bece0c..dd26b32f4030 100644 --- a/autogpt_platform/infra/helm/autogpt-websocket-server/values.dev.yaml +++ b/autogpt_platform/infra/helm/autogpt-websocket-server/values.dev.yaml @@ -1,7 +1,7 @@ replicaCount: 1 # not scaling websocket server for now image: - repository: us-east1-docker.pkg.dev/agpt-dev/agpt-server-dev/agpt-server-dev + repository: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev tag: latest pullPolicy: Always diff --git a/autogpt_platform/infra/terraform/environments/dev.tfvars b/autogpt_platform/infra/terraform/environments/dev.tfvars index 1f72c6d907a0..c193e8399a9a 100644 --- a/autogpt_platform/infra/terraform/environments/dev.tfvars +++ b/autogpt_platform/infra/terraform/environments/dev.tfvars @@ -28,6 +28,10 @@ service_accounts = { "dev-agpt-market-sa" = { display_name = "AutoGPT Dev Market Server Account" description = "Service account for agpt dev market server" + }, + "dev-github-actions-sa" = { + display_name = "GitHub Actions Dev Service Account" + description = "Service account for GitHub Actions deployments to dev" } } @@ -51,6 +55,11 @@ workload_identity_bindings = { service_account_name = "dev-agpt-market-sa" namespace = "dev-agpt" ksa_name = "dev-agpt-market-sa" + }, + "dev-github-actions-workload-identity" = { + service_account_name = "dev-github-actions-sa" + namespace = "dev-agpt" + ksa_name = "dev-github-actions-sa" } } @@ -59,7 +68,8 @@ role_bindings = { "serviceAccount:dev-agpt-server-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-builder-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-ws-server-sa@agpt-dev.iam.gserviceaccount.com", - "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com" + "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com", + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" ], "roles/cloudsql.client" = [ "serviceAccount:dev-agpt-server-sa@agpt-dev.iam.gserviceaccount.com", @@ -80,7 +90,8 @@ role_bindings = { "serviceAccount:dev-agpt-server-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-builder-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-ws-server-sa@agpt-dev.iam.gserviceaccount.com", - "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com" + "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com", + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" ] "roles/compute.networkUser" = [ "serviceAccount:dev-agpt-server-sa@agpt-dev.iam.gserviceaccount.com", @@ -93,6 +104,16 @@ role_bindings = { "serviceAccount:dev-agpt-builder-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-ws-server-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com" + ], + "roles/artifactregistry.writer" = [ + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" + ], + "roles/container.viewer" = [ + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" + ], + "roles/iam.serviceAccountTokenCreator" = [ + "principalSet://iam.googleapis.com/projects/638488734936/locations/global/workloadIdentityPools/dev-pool/*", + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" ] } @@ -101,4 +122,25 @@ services_ip_cidr_range = "10.2.0.0/20" public_bucket_names = ["website-artifacts"] standard_bucket_names = [] -bucket_admins = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] \ No newline at end of file +bucket_admins = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] + +workload_identity_pools = { + "dev-pool" = { + display_name = "Development Identity Pool" + providers = { + "github" = { + issuer_uri = "https://token.actions.githubusercontent.com" + attribute_mapping = { + "google.subject" = "assertion.sub" + "attribute.repository" = "assertion.repository" + "attribute.repository_owner" = "assertion.repository_owner" + } + } + } + service_accounts = { + "dev-github-actions-sa" = [ + "Significant-Gravitas/AutoGPT" + ] + } + } +} \ No newline at end of file diff --git a/autogpt_platform/infra/terraform/main.tf b/autogpt_platform/infra/terraform/main.tf index 31049ed40cb9..047ebd59c661 100644 --- a/autogpt_platform/infra/terraform/main.tf +++ b/autogpt_platform/infra/terraform/main.tf @@ -61,6 +61,7 @@ module "iam" { service_accounts = var.service_accounts workload_identity_bindings = var.workload_identity_bindings role_bindings = var.role_bindings + workload_identity_pools = var.workload_identity_pools } module "storage" { diff --git a/autogpt_platform/infra/terraform/modules/iam/main.tf b/autogpt_platform/infra/terraform/modules/iam/main.tf index 3a07d6926456..f632f4c42b43 100644 --- a/autogpt_platform/infra/terraform/modules/iam/main.tf +++ b/autogpt_platform/infra/terraform/modules/iam/main.tf @@ -23,4 +23,31 @@ resource "google_project_iam_binding" "role_bindings" { role = each.key members = each.value +} + +resource "google_iam_workload_identity_pool" "pools" { + for_each = var.workload_identity_pools + workload_identity_pool_id = each.key + display_name = each.value.display_name +} + +resource "google_iam_workload_identity_pool_provider" "providers" { + for_each = merge([ + for pool_id, pool in var.workload_identity_pools : { + for provider_id, provider in pool.providers : + "${pool_id}/${provider_id}" => merge(provider, { + pool_id = pool_id + }) + } + ]...) + + workload_identity_pool_id = split("/", each.key)[0] + workload_identity_pool_provider_id = split("/", each.key)[1] + + attribute_mapping = each.value.attribute_mapping + oidc { + issuer_uri = each.value.issuer_uri + allowed_audiences = each.value.allowed_audiences + } + attribute_condition = "assertion.repository_owner==\"Significant-Gravitas\"" } \ No newline at end of file diff --git a/autogpt_platform/infra/terraform/modules/iam/outputs.tf b/autogpt_platform/infra/terraform/modules/iam/outputs.tf index b503414873a9..19354364bebb 100644 --- a/autogpt_platform/infra/terraform/modules/iam/outputs.tf +++ b/autogpt_platform/infra/terraform/modules/iam/outputs.tf @@ -1,4 +1,14 @@ output "service_account_emails" { description = "The emails of the created service accounts" value = { for k, v in google_service_account.service_accounts : k => v.email } -} \ No newline at end of file +} + +output "workload_identity_pools" { + value = google_iam_workload_identity_pool.pools +} + +output "workload_identity_providers" { + value = { + for k, v in google_iam_workload_identity_pool_provider.providers : k => v.name + } +} diff --git a/autogpt_platform/infra/terraform/modules/iam/variables.tf b/autogpt_platform/infra/terraform/modules/iam/variables.tf index 61637a718783..c9563ea0c7b8 100644 --- a/autogpt_platform/infra/terraform/modules/iam/variables.tf +++ b/autogpt_platform/infra/terraform/modules/iam/variables.tf @@ -26,4 +26,17 @@ variable "role_bindings" { description = "Map of roles to list of members" type = map(list(string)) default = {} +} + +variable "workload_identity_pools" { + type = map(object({ + display_name = string + providers = map(object({ + issuer_uri = string + attribute_mapping = map(string) + allowed_audiences = optional(list(string)) + })) + service_accounts = map(list(string)) # Map of SA to list of allowed principals + })) + default = {} } \ No newline at end of file diff --git a/autogpt_platform/infra/terraform/variables.tf b/autogpt_platform/infra/terraform/variables.tf index 1afd9509fcbc..3b4eb92dff9c 100644 --- a/autogpt_platform/infra/terraform/variables.tf +++ b/autogpt_platform/infra/terraform/variables.tf @@ -130,3 +130,19 @@ variable "bucket_admins" { default = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] } +variable "workload_identity_pools" { + type = map(object({ + display_name = string + providers = map(object({ + issuer_uri = string + attribute_mapping = map(string) + allowed_audiences = optional(list(string)) + })) + service_accounts = map(list(string)) + })) + default = {} + description = "Configuration for workload identity pools and their providers" +} + + + From 81febb6589cea7fce97861120d7d0ca12d525fc8 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Tue, 22 Oct 2024 16:51:23 +0100 Subject: [PATCH 14/24] feat(platform): Added latest claude version (#8397) latest sonnet model --- autogpt_platform/backend/backend/blocks/llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index daec5b55d392..b87118d3f4f7 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -62,7 +62,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): GPT4_TURBO = "gpt-4-turbo" GPT3_5_TURBO = "gpt-3.5-turbo" # Anthropic models - CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620" + CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" # Groq models LLAMA3_8B = "llama3-8b-8192" From 0c517216dffc7db3bee6325f4ccffbf311b4cd00 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Tue, 22 Oct 2024 21:39:54 +0100 Subject: [PATCH 15/24] fix(frontend): Fix broken terms of use link (#8279) Co-authored-by: Aarushi <50577581+aarushik93@users.noreply.github.com> Co-authored-by: Nicholas Tindle --- .github/workflows/platform-frontend-ci.yml | 17 +++++++++++++++++ .../src/app/marketplace/submit/page.tsx | 11 +++++++---- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 4262cd6484ad..ce3633013bb9 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -39,10 +39,27 @@ jobs: runs-on: ubuntu-latest steps: + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + # this might remove tools that are actually needed, + # if set to "true" but frees about 6 GB + tool-cache: false + + # all of these default to true, but feel free to set to + # "false" if necessary for your workflow + android: false + dotnet: false + haskell: false + large-packages: true + docker-images: true + swap-storage: true + - name: Checkout repository uses: actions/checkout@v4 with: submodules: recursive + - name: Set up Node.js uses: actions/setup-node@v4 with: diff --git a/autogpt_platform/frontend/src/app/marketplace/submit/page.tsx b/autogpt_platform/frontend/src/app/marketplace/submit/page.tsx index 63ee9cfd2f0b..24863130a827 100644 --- a/autogpt_platform/frontend/src/app/marketplace/submit/page.tsx +++ b/autogpt_platform/frontend/src/app/marketplace/submit/page.tsx @@ -144,7 +144,7 @@ const SubmitPage: React.FC = () => { setSubmitError(null); if (!data.agreeToTerms) { - throw new Error("You must agree to the terms of service"); + throw new Error("You must agree to the terms of use"); } try { @@ -404,7 +404,7 @@ const SubmitPage: React.FC = () => { (
{ className="text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70" > I agree to the{" "} - - terms of service + + terms of use
From f4dac223353bc052a31f0641c6f74c5805ff78e0 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Wed, 23 Oct 2024 02:13:27 +0300 Subject: [PATCH 16/24] feat(backend): Track LLM token usage + LLM blocks cleanup (#8367) --- .../backend/backend/blocks/__init__.py | 24 +- .../backend/backend/blocks/llm.py | 257 ++++++++++-------- .../backend/backend/data/block.py | 33 ++- .../backend/backend/data/graph.py | 2 +- .../backend/backend/executor/manager.py | 2 + .../backend/backend/server/rest_api.py | 4 +- .../backend/test/block/test_block.py | 6 +- 7 files changed, 195 insertions(+), 133 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/__init__.py b/autogpt_platform/backend/backend/blocks/__init__.py index 940956a20b6f..4fb89e3957ff 100644 --- a/autogpt_platform/backend/backend/blocks/__init__.py +++ b/autogpt_platform/backend/backend/blocks/__init__.py @@ -2,6 +2,7 @@ import os import re from pathlib import Path +from typing import Type, TypeVar from backend.data.block import Block @@ -24,28 +25,31 @@ AVAILABLE_MODULES.append(module) # Load all Block instances from the available modules -AVAILABLE_BLOCKS = {} +AVAILABLE_BLOCKS: dict[str, Type[Block]] = {} -def all_subclasses(clz): - subclasses = clz.__subclasses__() +T = TypeVar("T") + + +def all_subclasses(cls: Type[T]) -> list[Type[T]]: + subclasses = cls.__subclasses__() for subclass in subclasses: subclasses += all_subclasses(subclass) return subclasses -for cls in all_subclasses(Block): - name = cls.__name__ +for block_cls in all_subclasses(Block): + name = block_cls.__name__ - if cls.__name__.endswith("Base"): + if block_cls.__name__.endswith("Base"): continue - if not cls.__name__.endswith("Block"): + if not block_cls.__name__.endswith("Block"): raise ValueError( - f"Block class {cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end" + f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end" ) - block = cls() + block = block_cls.create() if not isinstance(block.id, str) or len(block.id) != 36: raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID") @@ -87,6 +91,6 @@ def all_subclasses(clz): if block.disabled: continue - AVAILABLE_BLOCKS[block.id] = block + AVAILABLE_BLOCKS[block.id] = block_cls __all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"] diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index b87118d3f4f7..1366429a542d 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -122,6 +122,17 @@ def cost_factor(self) -> int: raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}") +class MessageRole(str, Enum): + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + + +class Message(BlockSchema): + role: MessageRole + content: str + + class AIStructuredResponseGeneratorBlock(Block): class Input(BlockSchema): prompt: str = SchemaField( @@ -144,6 +155,10 @@ class Input(BlockSchema): default="", description="The system prompt to provide additional context to the model.", ) + conversation_history: list[Message] = SchemaField( + default=[], + description="The conversation history to provide context for the prompt.", + ) retry: int = SchemaField( title="Retry Count", default=3, @@ -152,6 +167,11 @@ class Input(BlockSchema): prompt_values: dict[str, str] = SchemaField( advanced=False, default={}, description="Values used to fill in the prompt." ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) class Output(BlockSchema): response: dict[str, Any] = SchemaField( @@ -177,26 +197,47 @@ def __init__(self): }, test_output=("response", {"key1": "key1Value", "key2": "key2Value"}), test_mock={ - "llm_call": lambda *args, **kwargs: json.dumps( - { - "key1": "key1Value", - "key2": "key2Value", - } + "llm_call": lambda *args, **kwargs: ( + json.dumps( + { + "key1": "key1Value", + "key2": "key2Value", + } + ), + 0, + 0, ) }, ) @staticmethod def llm_call( - api_key: str, model: LlmModel, prompt: list[dict], json_format: bool - ) -> str: - provider = model.metadata.provider + api_key: str, + llm_model: LlmModel, + prompt: list[dict], + json_format: bool, + max_tokens: int | None = None, + ) -> tuple[str, int, int]: + """ + Args: + api_key: API key for the LLM provider. + llm_model: The LLM model to use. + prompt: The prompt to send to the LLM. + json_format: Whether the response should be in JSON format. + max_tokens: The maximum number of tokens to generate in the chat completion. + + Returns: + The response from the LLM. + The number of tokens used in the prompt. + The number of tokens used in the completion. + """ + provider = llm_model.metadata.provider if provider == "openai": openai.api_key = api_key response_format = None - if model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]: + if llm_model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]: sys_messages = [p["content"] for p in prompt if p["role"] == "system"] usr_messages = [p["content"] for p in prompt if p["role"] != "system"] prompt = [ @@ -207,11 +248,17 @@ def llm_call( response_format = {"type": "json_object"} response = openai.chat.completions.create( - model=model.value, + model=llm_model.value, messages=prompt, # type: ignore response_format=response_format, # type: ignore + max_completion_tokens=max_tokens, + ) + + return ( + response.choices[0].message.content or "", + response.usage.prompt_tokens if response.usage else 0, + response.usage.completion_tokens if response.usage else 0, ) - return response.choices[0].message.content or "" elif provider == "anthropic": system_messages = [p["content"] for p in prompt if p["role"] == "system"] sysprompt = " ".join(system_messages) @@ -229,13 +276,18 @@ def llm_call( client = anthropic.Anthropic(api_key=api_key) try: - response = client.messages.create( - model=model.value, - max_tokens=4096, + resp = client.messages.create( + model=llm_model.value, system=sysprompt, messages=messages, + max_tokens=max_tokens or 8192, + ) + + return ( + resp.content[0].text if resp.content else "", + resp.usage.input_tokens, + resp.usage.output_tokens, ) - return response.content[0].text if response.content else "" except anthropic.APIError as e: error_message = f"Anthropic API error: {str(e)}" logger.error(error_message) @@ -244,23 +296,35 @@ def llm_call( client = Groq(api_key=api_key) response_format = {"type": "json_object"} if json_format else None response = client.chat.completions.create( - model=model.value, + model=llm_model.value, messages=prompt, # type: ignore response_format=response_format, # type: ignore + max_tokens=max_tokens, + ) + return ( + response.choices[0].message.content or "", + response.usage.prompt_tokens if response.usage else 0, + response.usage.completion_tokens if response.usage else 0, ) - return response.choices[0].message.content or "" elif provider == "ollama": + sys_messages = [p["content"] for p in prompt if p["role"] == "system"] + usr_messages = [p["content"] for p in prompt if p["role"] != "system"] response = ollama.generate( - model=model.value, - prompt=prompt[0]["content"], + model=llm_model.value, + prompt=f"{sys_messages}\n\n{usr_messages}", + stream=False, + ) + return ( + response.get("response") or "", + response.get("prompt_eval_count") or 0, + response.get("eval_count") or 0, ) - return response["response"] else: raise ValueError(f"Unsupported LLM provider: {provider}") def run(self, input_data: Input, **kwargs) -> BlockOutput: logger.debug(f"Calling LLM with input data: {input_data}") - prompt = [] + prompt = [p.model_dump() for p in input_data.conversation_history] def trim_prompt(s: str) -> str: lines = s.strip().split("\n") @@ -289,7 +353,8 @@ def trim_prompt(s: str) -> str: ) prompt.append({"role": "system", "content": sys_prompt}) - prompt.append({"role": "user", "content": input_data.prompt}) + if input_data.prompt: + prompt.append({"role": "user", "content": input_data.prompt}) def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: try: @@ -305,19 +370,26 @@ def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: logger.info(f"LLM request: {prompt}") retry_prompt = "" - model = input_data.model + llm_model = input_data.model api_key = ( input_data.api_key.get_secret_value() - or LlmApiKeys[model.metadata.provider].get_secret_value() + or LlmApiKeys[llm_model.metadata.provider].get_secret_value() ) for retry_count in range(input_data.retry): try: - response_text = self.llm_call( + response_text, input_token, output_token = self.llm_call( api_key=api_key, - model=model, + llm_model=llm_model, prompt=prompt, json_format=bool(input_data.expected_format), + max_tokens=input_data.max_tokens, + ) + self.merge_stats( + { + "input_token_count": input_token, + "output_token_count": output_token, + } ) logger.info(f"LLM attempt-{retry_count} response: {response_text}") @@ -354,8 +426,15 @@ def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: ) prompt.append({"role": "user", "content": retry_prompt}) except Exception as e: - logger.error(f"Error calling LLM: {e}") + logger.exception(f"Error calling LLM: {e}") retry_prompt = f"Error calling LLM: {e}" + finally: + self.merge_stats( + { + "llm_call_count": retry_count + 1, + "llm_retry_count": retry_count, + } + ) raise RuntimeError(retry_prompt) @@ -386,6 +465,11 @@ class Input(BlockSchema): prompt_values: dict[str, str] = SchemaField( advanced=False, default={}, description="Values used to fill in the prompt." ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) class Output(BlockSchema): response: str = SchemaField( @@ -405,15 +489,11 @@ def __init__(self): test_mock={"llm_call": lambda *args, **kwargs: "Response text"}, ) - @staticmethod - def llm_call(input_data: AIStructuredResponseGeneratorBlock.Input) -> str: - object_block = AIStructuredResponseGeneratorBlock() - for output_name, output_data in object_block.run(input_data): - if output_name == "response": - return output_data["response"] - else: - raise RuntimeError(output_data) - raise ValueError("Failed to get a response from the LLM.") + def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> str: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response") + self.merge_stats(block.execution_stats) + return response["response"] def run(self, input_data: Input, **kwargs) -> BlockOutput: object_input_data = AIStructuredResponseGeneratorBlock.Input( @@ -517,15 +597,11 @@ def _split_text(text: str, max_tokens: int, overlap: int) -> list[str]: return chunks - @staticmethod - def llm_call( - input_data: AIStructuredResponseGeneratorBlock.Input, - ) -> dict[str, str]: - llm_block = AIStructuredResponseGeneratorBlock() - for output_name, output_data in llm_block.run(input_data): - if output_name == "response": - return output_data - raise ValueError("Failed to get a response from the LLM.") + def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> dict: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response") + self.merge_stats(block.execution_stats) + return response def _summarize_chunk(self, chunk: str, input_data: Input) -> str: prompt = f"Summarize the following text in a {input_data.style} form. Focus your summary on the topic of `{input_data.focus}` if present, otherwise just provide a general summary:\n\n```{chunk}```" @@ -574,17 +650,6 @@ def _combine_summaries(self, summaries: list[str], input_data: Input) -> str: ] # Get the first yielded value -class MessageRole(str, Enum): - SYSTEM = "system" - USER = "user" - ASSISTANT = "assistant" - - -class Message(BlockSchema): - role: MessageRole - content: str - - class AIConversationBlock(Block): class Input(BlockSchema): messages: List[Message] = SchemaField( @@ -599,9 +664,9 @@ class Input(BlockSchema): value="", description="API key for the chosen language model provider." ) max_tokens: int | None = SchemaField( + advanced=True, default=None, description="The maximum number of tokens to generate in the chat completion.", - ge=1, ) class Output(BlockSchema): @@ -639,62 +704,22 @@ def __init__(self): }, ) - @staticmethod - def llm_call( - api_key: str, - model: LlmModel, - messages: List[dict[str, str]], - max_tokens: int | None = None, - ) -> str: - provider = model.metadata.provider - - if provider == "openai": - openai.api_key = api_key - response = openai.chat.completions.create( - model=model.value, - messages=messages, # type: ignore - max_tokens=max_tokens, - ) - return response.choices[0].message.content or "" - elif provider == "anthropic": - client = anthropic.Anthropic(api_key=api_key) - response = client.messages.create( - model=model.value, - max_tokens=max_tokens or 4096, - messages=messages, # type: ignore - ) - return response.content[0].text if response.content else "" - elif provider == "groq": - client = Groq(api_key=api_key) - response = client.chat.completions.create( - model=model.value, - messages=messages, # type: ignore - max_tokens=max_tokens, - ) - return response.choices[0].message.content or "" - elif provider == "ollama": - response = ollama.chat( - model=model.value, - messages=messages, # type: ignore - stream=False, # type: ignore - ) - return response["message"]["content"] - else: - raise ValueError(f"Unsupported LLM provider: {provider}") + def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> str: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response") + self.merge_stats(block.execution_stats) + return response["response"] def run(self, input_data: Input, **kwargs) -> BlockOutput: - api_key = ( - input_data.api_key.get_secret_value() - or LlmApiKeys[input_data.model.metadata.provider].get_secret_value() - ) - - messages = [message.model_dump() for message in input_data.messages] - response = self.llm_call( - api_key=api_key, - model=input_data.model, - messages=messages, - max_tokens=input_data.max_tokens, + AIStructuredResponseGeneratorBlock.Input( + prompt="", + api_key=input_data.api_key, + model=input_data.model, + conversation_history=input_data.messages, + max_tokens=input_data.max_tokens, + expected_format={}, + ) ) yield "response", response @@ -727,6 +752,11 @@ class Input(BlockSchema): ge=1, le=5, ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) class Output(BlockSchema): generated_list: List[str] = SchemaField(description="The generated list.") @@ -781,11 +811,8 @@ def llm_call( input_data: AIStructuredResponseGeneratorBlock.Input, ) -> dict[str, str]: llm_block = AIStructuredResponseGeneratorBlock() - for output_name, output_data in llm_block.run(input_data): - if output_name == "response": - logger.debug(f"Received response from LLM: {output_data}") - return output_data - raise ValueError("Failed to get a response from the LLM.") + response = llm_block.run_once(input_data, "response") + return response @staticmethod def string_to_list(string): diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py index 594fd10e7681..e89013b3b3f5 100644 --- a/autogpt_platform/backend/backend/data/block.py +++ b/autogpt_platform/backend/backend/data/block.py @@ -230,6 +230,11 @@ def __init__( self.disabled = disabled self.static_output = static_output self.block_type = block_type + self.execution_stats = {} + + @classmethod + def create(cls: Type["Block"]) -> "Block": + return cls() @abstractmethod def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput: @@ -244,6 +249,26 @@ def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput: """ pass + def run_once(self, input_data: BlockSchemaInputType, output: str, **kwargs) -> Any: + for name, data in self.run(input_data, **kwargs): + if name == output: + return data + raise ValueError(f"{self.name} did not produce any output for {output}") + + def merge_stats(self, stats: dict[str, Any]) -> dict[str, Any]: + for key, value in stats.items(): + if isinstance(value, dict): + self.execution_stats.setdefault(key, {}).update(value) + elif isinstance(value, (int, float)): + self.execution_stats.setdefault(key, 0) + self.execution_stats[key] += value + elif isinstance(value, list): + self.execution_stats.setdefault(key, []) + self.execution_stats[key].extend(value) + else: + self.execution_stats[key] = value + return self.execution_stats + @property def name(self): return self.__class__.__name__ @@ -282,14 +307,15 @@ def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput: # ======================= Block Helper Functions ======================= # -def get_blocks() -> dict[str, Block]: +def get_blocks() -> dict[str, Type[Block]]: from backend.blocks import AVAILABLE_BLOCKS # noqa: E402 return AVAILABLE_BLOCKS async def initialize_blocks() -> None: - for block in get_blocks().values(): + for cls in get_blocks().values(): + block = cls() existing_block = await AgentBlock.prisma().find_first( where={"OR": [{"id": block.id}, {"name": block.name}]} ) @@ -324,4 +350,5 @@ async def initialize_blocks() -> None: def get_block(block_id: str) -> Block | None: - return get_blocks().get(block_id) + cls = get_blocks().get(block_id) + return cls() if cls else None diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 4f1be1de1ed8..b4f8f8aeb739 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -257,7 +257,7 @@ def is_input_output_block(nid: str) -> bool: block = get_block(node.block_id) if not block: - blocks = {v.id: v.name for v in get_blocks().values()} + blocks = {v().id: v().name for v in get_blocks().values()} raise ValueError( f"{suffix}, {node.block_id} is invalid block id, available blocks: {blocks}" ) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 4e6cfe9e3bbb..ce6de9e9a623 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -104,6 +104,7 @@ def execute_node( Args: db_client: The client to send execution updates to the server. + creds_manager: The manager to acquire and release credentials. data: The execution data for executing the current node. execution_stats: The execution statistics to be updated. @@ -209,6 +210,7 @@ def update_execution(status: ExecutionStatus) -> ExecutionResult: if creds_lock: creds_lock.release() if execution_stats is not None: + execution_stats.update(node_block.execution_stats) execution_stats["input_size"] = input_size execution_stats["output_size"] = output_size diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index 7cb6988cfd9f..edeb025803c6 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -331,9 +331,9 @@ async def get_or_create_user_route(cls, user_data: dict = Depends(auth_middlewar @classmethod def get_graph_blocks(cls) -> list[dict[Any, Any]]: - blocks = block.get_blocks() + blocks = [cls() for cls in block.get_blocks().values()] costs = get_block_costs() - return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks.values()] + return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks] @classmethod def execute_graph_block( diff --git a/autogpt_platform/backend/test/block/test_block.py b/autogpt_platform/backend/test/block/test_block.py index be16a0b1a76e..48d2616f613e 100644 --- a/autogpt_platform/backend/test/block/test_block.py +++ b/autogpt_platform/backend/test/block/test_block.py @@ -1,3 +1,5 @@ +from typing import Type + import pytest from backend.data.block import Block, get_blocks @@ -5,5 +7,5 @@ @pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b.name) -def test_available_blocks(block: Block): - execute_block_test(type(block)()) +def test_available_blocks(block: Type[Block]): + execute_block_test(block()) From e5f5005ab846ddf146ace1dc42a670532e9fe851 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Wed, 23 Oct 2024 07:39:15 +0700 Subject: [PATCH 17/24] fix(backend): Fix DatabaseManager usage by calling it on-demand --- .../store.py | 9 ++++-- .../autogpt_libs/utils}/cache.py | 0 .../backend/backend/executor/database.py | 6 +++- .../backend/backend/executor/manager.py | 15 ++++++---- .../backend/backend/executor/scheduler.py | 10 +++++-- .../backend/integrations/creds_manager.py | 7 ++--- .../backend/server/integrations/router.py | 3 +- .../backend/backend/server/rest_api.py | 15 +++++----- .../backend/backend/util/service.py | 28 +++++++++++++------ .../backend/test/executor/test_scheduler.py | 6 +--- .../backend/test/util/test_service.py | 8 ++++-- 11 files changed, 66 insertions(+), 41 deletions(-) rename autogpt_platform/{backend/backend/util => autogpt_libs/autogpt_libs/utils}/cache.py (100%) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py index f4ce921937e7..8b539c15c58b 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py @@ -6,6 +6,7 @@ from redis import Redis from backend.executor.database import DatabaseManager +from autogpt_libs.utils.cache import thread_cached_property from autogpt_libs.utils.synchronize import RedisKeyedMutex from .types import ( @@ -18,9 +19,13 @@ class SupabaseIntegrationCredentialsStore: - def __init__(self, redis: "Redis", db: "DatabaseManager"): - self.db_manager: DatabaseManager = db + def __init__(self, redis: "Redis"): self.locks = RedisKeyedMutex(redis) + + @thread_cached_property + def db_manager(self) -> "DatabaseManager": + from backend.executor.database import DatabaseManager + return DatabaseManager.client def add_creds(self, user_id: str, credentials: Credentials) -> None: with self.locked_user_metadata(user_id): diff --git a/autogpt_platform/backend/backend/util/cache.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py similarity index 100% rename from autogpt_platform/backend/backend/util/cache.py rename to autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index aea2dd4177fc..b404ac6ecf17 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -27,11 +27,15 @@ class DatabaseManager(AppService): def __init__(self): - super().__init__(port=Config().database_api_port) + super().__init__() self.use_db = True self.use_redis = True self.event_queue = RedisEventQueue() + @classmethod + def get_port(cls) -> int: + return Config().database_api_port + @expose def send_execution_update(self, execution_result_dict: dict[Any, Any]): self.event_queue.put(ExecutionResult(**execution_result_dict)) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index ce6de9e9a623..033d2bec3de1 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -16,6 +16,8 @@ if TYPE_CHECKING: from backend.executor import DatabaseManager +from autogpt_libs.utils.cache import thread_cached + from backend.data import redis from backend.data.block import Block, BlockData, BlockInput, BlockType, get_block from backend.data.execution import ( @@ -31,7 +33,6 @@ from backend.data.model import CREDENTIALS_FIELD_NAME, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util import json -from backend.util.cache import thread_cached from backend.util.decorator import error_logged, time_measured from backend.util.logging import configure_logging from backend.util.process import set_service_name @@ -419,7 +420,7 @@ def on_node_executor_start(cls): redis.connect() cls.pid = os.getpid() cls.db_client = get_db_client() - cls.creds_manager = IntegrationCredentialsManager(db_manager=cls.db_client) + cls.creds_manager = IntegrationCredentialsManager() # Set up shutdown handlers cls.shutdown_lock = threading.Lock() @@ -659,20 +660,24 @@ def callback(_): class ExecutionManager(AppService): def __init__(self): - super().__init__(port=settings.config.execution_manager_port) + super().__init__() self.use_redis = True self.use_supabase = True self.pool_size = settings.config.num_graph_workers self.queue = ExecutionQueue[GraphExecution]() self.active_graph_runs: dict[str, tuple[Future, threading.Event]] = {} + @classmethod + def get_port(cls) -> int: + return settings.config.execution_manager_port + def run_service(self): from autogpt_libs.supabase_integration_credentials_store import ( SupabaseIntegrationCredentialsStore, ) self.credentials_store = SupabaseIntegrationCredentialsStore( - redis=redis.get_redis(), db=self.db_client + redis=redis.get_redis() ) self.executor = ProcessPoolExecutor( max_workers=self.pool_size, @@ -863,7 +868,7 @@ def _validate_node_input_credentials(self, graph: Graph, user_id: str): def get_db_client() -> "DatabaseManager": from backend.executor import DatabaseManager - return get_service_client(DatabaseManager, settings.config.database_api_port) + return get_service_client(DatabaseManager) @contextmanager diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index 574765c34845..979631c0585e 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -4,6 +4,7 @@ from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger +from autogpt_libs.utils.cache import thread_cached_property from backend.data.block import BlockInput from backend.data.schedule import ( @@ -14,7 +15,6 @@ update_schedule, ) from backend.executor.manager import ExecutionManager -from backend.util.cache import thread_cached_property from backend.util.service import AppService, expose, get_service_client from backend.util.settings import Config @@ -28,14 +28,18 @@ def log(msg, **kwargs): class ExecutionScheduler(AppService): def __init__(self, refresh_interval=10): - super().__init__(port=Config().execution_scheduler_port) + super().__init__() self.use_db = True self.last_check = datetime.min self.refresh_interval = refresh_interval + @classmethod + def get_port(cls) -> int: + return Config().execution_scheduler_port + @thread_cached_property def execution_client(self) -> ExecutionManager: - return get_service_client(ExecutionManager, Config().execution_manager_port) + return get_service_client(ExecutionManager) def run_service(self): scheduler = BackgroundScheduler() diff --git a/autogpt_platform/backend/backend/integrations/creds_manager.py b/autogpt_platform/backend/backend/integrations/creds_manager.py index 3bfde70e2817..96f9d1a3c56d 100644 --- a/autogpt_platform/backend/backend/integrations/creds_manager.py +++ b/autogpt_platform/backend/backend/integrations/creds_manager.py @@ -10,7 +10,6 @@ from redis.lock import Lock as RedisLock from backend.data import redis -from backend.executor.database import DatabaseManager from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings @@ -50,12 +49,10 @@ class IntegrationCredentialsManager: cause so much latency that it's worth implementing. """ - def __init__(self, db_manager: DatabaseManager): + def __init__(self): redis_conn = redis.get_redis() self._locks = RedisKeyedMutex(redis_conn) - self.store = SupabaseIntegrationCredentialsStore( - redis=redis_conn, db=db_manager - ) + self.store = SupabaseIntegrationCredentialsStore(redis=redis_conn) def create(self, user_id: str, credentials: Credentials) -> None: return self.store.add_creds(user_id, credentials) diff --git a/autogpt_platform/backend/backend/server/integrations/router.py b/autogpt_platform/backend/backend/server/integrations/router.py index 440ce7921cbe..5163de0b2fa3 100644 --- a/autogpt_platform/backend/backend/server/integrations/router.py +++ b/autogpt_platform/backend/backend/server/integrations/router.py @@ -10,7 +10,6 @@ from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request from pydantic import BaseModel, Field, SecretStr -from backend.executor.manager import get_db_client from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings @@ -21,7 +20,7 @@ settings = Settings() router = APIRouter() -creds_manager = IntegrationCredentialsManager(db_manager=get_db_client()) +creds_manager = IntegrationCredentialsManager() class LoginResponse(BaseModel): diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index edeb025803c6..f0d922c19686 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -7,6 +7,7 @@ import uvicorn from autogpt_libs.auth.middleware import auth_middleware +from autogpt_libs.utils.cache import thread_cached_property from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse @@ -19,10 +20,7 @@ from backend.data.credit import get_block_costs, get_user_credit_model from backend.data.user import get_or_create_user from backend.executor import ExecutionManager, ExecutionScheduler -from backend.executor.manager import get_db_client -from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.server.model import CreateGraph, SetGraphActiveVersion -from backend.util.cache import thread_cached_property from backend.util.service import AppService, get_service_client from backend.util.settings import AppEnvironment, Config, Settings @@ -37,9 +35,13 @@ class AgentServer(AppService): _user_credit_model = get_user_credit_model() def __init__(self): - super().__init__(port=Config().agent_server_port) + super().__init__() self.use_redis = True + @classmethod + def get_port(cls) -> int: + return Config().agent_server_port + @asynccontextmanager async def lifespan(self, _: FastAPI): await db.connect() @@ -98,7 +100,6 @@ def run_service(self): tags=["integrations"], dependencies=[Depends(auth_middleware)], ) - self.integration_creds_manager = IntegrationCredentialsManager(get_db_client()) api_router.include_router( backend.server.routers.analytics.router, @@ -308,11 +309,11 @@ async def wrapper(*args, **kwargs): @thread_cached_property def execution_manager_client(self) -> ExecutionManager: - return get_service_client(ExecutionManager, Config().execution_manager_port) + return get_service_client(ExecutionManager) @thread_cached_property def execution_scheduler_client(self) -> ExecutionScheduler: - return get_service_client(ExecutionScheduler, Config().execution_scheduler_port) + return get_service_client(ExecutionScheduler) @classmethod def handle_internal_http_error(cls, request: Request, exc: Exception): diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index 3c4532b5ad00..e1f742483a64 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -5,6 +5,7 @@ import threading import time import typing +from abc import ABC, abstractmethod from enum import Enum from types import NoneType, UnionType from typing import ( @@ -99,16 +100,24 @@ def custom_dict_to_class(qualname, data: dict): return custom_dict_to_class -class AppService(AppProcess): +class AppService(AppProcess, ABC): shared_event_loop: asyncio.AbstractEventLoop use_db: bool = False use_redis: bool = False use_supabase: bool = False - def __init__(self, port): - self.port = port + def __init__(self): self.uri = None + @classmethod + @abstractmethod + def get_port(cls) -> int: + pass + + @classmethod + def get_host(cls) -> str: + return os.environ.get(f"{cls.service_name.upper()}_HOST", Config().pyro_host) + def run_service(self) -> None: while True: time.sleep(10) @@ -157,8 +166,7 @@ def cleanup(self): @conn_retry("Pyro", "Starting Pyro Service") def __start_pyro(self): - host = Config().pyro_host - daemon = Pyro5.api.Daemon(host=host, port=self.port) + daemon = Pyro5.api.Daemon(host=self.get_host(), port=self.get_port()) self.uri = daemon.register(self, objectId=self.service_name) logger.info(f"[{self.service_name}] Connected to Pyro; URI = {self.uri}") daemon.requestLoop() @@ -167,16 +175,20 @@ def __start_async_loop(self): self.shared_event_loop.run_forever() +# --------- UTILITIES --------- # + + AS = TypeVar("AS", bound=AppService) -def get_service_client(service_type: Type[AS], port: int) -> AS: +def get_service_client(service_type: Type[AS]) -> AS: service_name = service_type.service_name class DynamicClient: @conn_retry("Pyro", f"Connecting to [{service_name}]") def __init__(self): - host = os.environ.get(f"{service_name.upper()}_HOST", "localhost") + host = service_type.get_host() + port = service_type.get_port() uri = f"PYRO:{service_type.service_name}@{host}:{port}" logger.debug(f"Connecting to service [{service_name}]. URI = {uri}") self.proxy = Pyro5.api.Proxy(uri) @@ -191,8 +203,6 @@ def __getattr__(self, name: str) -> Callable[..., Any]: return cast(AS, DynamicClient()) -# --------- UTILITIES --------- # - builtin_types = [*vars(builtins).values(), NoneType, Enum] diff --git a/autogpt_platform/backend/test/executor/test_scheduler.py b/autogpt_platform/backend/test/executor/test_scheduler.py index c0bcc8307925..49e46510a120 100644 --- a/autogpt_platform/backend/test/executor/test_scheduler.py +++ b/autogpt_platform/backend/test/executor/test_scheduler.py @@ -5,7 +5,6 @@ from backend.server.model import CreateGraph from backend.usecases.sample import create_test_graph, create_test_user from backend.util.service import get_service_client -from backend.util.settings import Config from backend.util.test import SpinTestServer @@ -19,10 +18,7 @@ async def test_agent_schedule(server: SpinTestServer): user_id=test_user.id, ) - scheduler = get_service_client( - ExecutionScheduler, Config().execution_scheduler_port - ) - + scheduler = get_service_client(ExecutionScheduler) schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id) assert len(schedules) == 0 diff --git a/autogpt_platform/backend/test/util/test_service.py b/autogpt_platform/backend/test/util/test_service.py index e03063fff32d..a20810dbb1e5 100644 --- a/autogpt_platform/backend/test/util/test_service.py +++ b/autogpt_platform/backend/test/util/test_service.py @@ -7,7 +7,11 @@ class ServiceTest(AppService): def __init__(self): - super().__init__(port=TEST_SERVICE_PORT) + super().__init__() + + @classmethod + def get_port(cls) -> int: + return TEST_SERVICE_PORT @expose def add(self, a: int, b: int) -> int: @@ -28,7 +32,7 @@ async def add_async(a: int, b: int) -> int: @pytest.mark.asyncio(scope="session") async def test_service_creation(server): with ServiceTest(): - client = get_service_client(ServiceTest, TEST_SERVICE_PORT) + client = get_service_client(ServiceTest) assert client.add(5, 3) == 8 assert client.subtract(10, 4) == 6 assert client.fun_with_async(5, 3) == 8 From 7f318685afe4c1413f7db45714ab6d9921b575ee Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Wed, 23 Oct 2024 07:41:59 +0700 Subject: [PATCH 18/24] Revert "fix(backend): Fix DatabaseManager usage by calling it on-demand" This reverts commit e5f5005ab846ddf146ace1dc42a670532e9fe851. --- .../store.py | 9 ++---- .../backend/backend/executor/database.py | 6 +--- .../backend/backend/executor/manager.py | 15 ++++------ .../backend/backend/executor/scheduler.py | 10 ++----- .../backend/integrations/creds_manager.py | 7 +++-- .../backend/server/integrations/router.py | 3 +- .../backend/backend/server/rest_api.py | 15 +++++----- .../utils => backend/backend/util}/cache.py | 0 .../backend/backend/util/service.py | 28 ++++++------------- .../backend/test/executor/test_scheduler.py | 6 +++- .../backend/test/util/test_service.py | 8 ++---- 11 files changed, 41 insertions(+), 66 deletions(-) rename autogpt_platform/{autogpt_libs/autogpt_libs/utils => backend/backend/util}/cache.py (100%) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py index 8b539c15c58b..f4ce921937e7 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py @@ -6,7 +6,6 @@ from redis import Redis from backend.executor.database import DatabaseManager -from autogpt_libs.utils.cache import thread_cached_property from autogpt_libs.utils.synchronize import RedisKeyedMutex from .types import ( @@ -19,13 +18,9 @@ class SupabaseIntegrationCredentialsStore: - def __init__(self, redis: "Redis"): + def __init__(self, redis: "Redis", db: "DatabaseManager"): + self.db_manager: DatabaseManager = db self.locks = RedisKeyedMutex(redis) - - @thread_cached_property - def db_manager(self) -> "DatabaseManager": - from backend.executor.database import DatabaseManager - return DatabaseManager.client def add_creds(self, user_id: str, credentials: Credentials) -> None: with self.locked_user_metadata(user_id): diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index b404ac6ecf17..aea2dd4177fc 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -27,15 +27,11 @@ class DatabaseManager(AppService): def __init__(self): - super().__init__() + super().__init__(port=Config().database_api_port) self.use_db = True self.use_redis = True self.event_queue = RedisEventQueue() - @classmethod - def get_port(cls) -> int: - return Config().database_api_port - @expose def send_execution_update(self, execution_result_dict: dict[Any, Any]): self.event_queue.put(ExecutionResult(**execution_result_dict)) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 033d2bec3de1..ce6de9e9a623 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -16,8 +16,6 @@ if TYPE_CHECKING: from backend.executor import DatabaseManager -from autogpt_libs.utils.cache import thread_cached - from backend.data import redis from backend.data.block import Block, BlockData, BlockInput, BlockType, get_block from backend.data.execution import ( @@ -33,6 +31,7 @@ from backend.data.model import CREDENTIALS_FIELD_NAME, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util import json +from backend.util.cache import thread_cached from backend.util.decorator import error_logged, time_measured from backend.util.logging import configure_logging from backend.util.process import set_service_name @@ -420,7 +419,7 @@ def on_node_executor_start(cls): redis.connect() cls.pid = os.getpid() cls.db_client = get_db_client() - cls.creds_manager = IntegrationCredentialsManager() + cls.creds_manager = IntegrationCredentialsManager(db_manager=cls.db_client) # Set up shutdown handlers cls.shutdown_lock = threading.Lock() @@ -660,24 +659,20 @@ def callback(_): class ExecutionManager(AppService): def __init__(self): - super().__init__() + super().__init__(port=settings.config.execution_manager_port) self.use_redis = True self.use_supabase = True self.pool_size = settings.config.num_graph_workers self.queue = ExecutionQueue[GraphExecution]() self.active_graph_runs: dict[str, tuple[Future, threading.Event]] = {} - @classmethod - def get_port(cls) -> int: - return settings.config.execution_manager_port - def run_service(self): from autogpt_libs.supabase_integration_credentials_store import ( SupabaseIntegrationCredentialsStore, ) self.credentials_store = SupabaseIntegrationCredentialsStore( - redis=redis.get_redis() + redis=redis.get_redis(), db=self.db_client ) self.executor = ProcessPoolExecutor( max_workers=self.pool_size, @@ -868,7 +863,7 @@ def _validate_node_input_credentials(self, graph: Graph, user_id: str): def get_db_client() -> "DatabaseManager": from backend.executor import DatabaseManager - return get_service_client(DatabaseManager) + return get_service_client(DatabaseManager, settings.config.database_api_port) @contextmanager diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index 979631c0585e..574765c34845 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -4,7 +4,6 @@ from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger -from autogpt_libs.utils.cache import thread_cached_property from backend.data.block import BlockInput from backend.data.schedule import ( @@ -15,6 +14,7 @@ update_schedule, ) from backend.executor.manager import ExecutionManager +from backend.util.cache import thread_cached_property from backend.util.service import AppService, expose, get_service_client from backend.util.settings import Config @@ -28,18 +28,14 @@ def log(msg, **kwargs): class ExecutionScheduler(AppService): def __init__(self, refresh_interval=10): - super().__init__() + super().__init__(port=Config().execution_scheduler_port) self.use_db = True self.last_check = datetime.min self.refresh_interval = refresh_interval - @classmethod - def get_port(cls) -> int: - return Config().execution_scheduler_port - @thread_cached_property def execution_client(self) -> ExecutionManager: - return get_service_client(ExecutionManager) + return get_service_client(ExecutionManager, Config().execution_manager_port) def run_service(self): scheduler = BackgroundScheduler() diff --git a/autogpt_platform/backend/backend/integrations/creds_manager.py b/autogpt_platform/backend/backend/integrations/creds_manager.py index 96f9d1a3c56d..3bfde70e2817 100644 --- a/autogpt_platform/backend/backend/integrations/creds_manager.py +++ b/autogpt_platform/backend/backend/integrations/creds_manager.py @@ -10,6 +10,7 @@ from redis.lock import Lock as RedisLock from backend.data import redis +from backend.executor.database import DatabaseManager from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings @@ -49,10 +50,12 @@ class IntegrationCredentialsManager: cause so much latency that it's worth implementing. """ - def __init__(self): + def __init__(self, db_manager: DatabaseManager): redis_conn = redis.get_redis() self._locks = RedisKeyedMutex(redis_conn) - self.store = SupabaseIntegrationCredentialsStore(redis=redis_conn) + self.store = SupabaseIntegrationCredentialsStore( + redis=redis_conn, db=db_manager + ) def create(self, user_id: str, credentials: Credentials) -> None: return self.store.add_creds(user_id, credentials) diff --git a/autogpt_platform/backend/backend/server/integrations/router.py b/autogpt_platform/backend/backend/server/integrations/router.py index 5163de0b2fa3..440ce7921cbe 100644 --- a/autogpt_platform/backend/backend/server/integrations/router.py +++ b/autogpt_platform/backend/backend/server/integrations/router.py @@ -10,6 +10,7 @@ from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request from pydantic import BaseModel, Field, SecretStr +from backend.executor.manager import get_db_client from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings @@ -20,7 +21,7 @@ settings = Settings() router = APIRouter() -creds_manager = IntegrationCredentialsManager() +creds_manager = IntegrationCredentialsManager(db_manager=get_db_client()) class LoginResponse(BaseModel): diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index f0d922c19686..edeb025803c6 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -7,7 +7,6 @@ import uvicorn from autogpt_libs.auth.middleware import auth_middleware -from autogpt_libs.utils.cache import thread_cached_property from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse @@ -20,7 +19,10 @@ from backend.data.credit import get_block_costs, get_user_credit_model from backend.data.user import get_or_create_user from backend.executor import ExecutionManager, ExecutionScheduler +from backend.executor.manager import get_db_client +from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.server.model import CreateGraph, SetGraphActiveVersion +from backend.util.cache import thread_cached_property from backend.util.service import AppService, get_service_client from backend.util.settings import AppEnvironment, Config, Settings @@ -35,13 +37,9 @@ class AgentServer(AppService): _user_credit_model = get_user_credit_model() def __init__(self): - super().__init__() + super().__init__(port=Config().agent_server_port) self.use_redis = True - @classmethod - def get_port(cls) -> int: - return Config().agent_server_port - @asynccontextmanager async def lifespan(self, _: FastAPI): await db.connect() @@ -100,6 +98,7 @@ def run_service(self): tags=["integrations"], dependencies=[Depends(auth_middleware)], ) + self.integration_creds_manager = IntegrationCredentialsManager(get_db_client()) api_router.include_router( backend.server.routers.analytics.router, @@ -309,11 +308,11 @@ async def wrapper(*args, **kwargs): @thread_cached_property def execution_manager_client(self) -> ExecutionManager: - return get_service_client(ExecutionManager) + return get_service_client(ExecutionManager, Config().execution_manager_port) @thread_cached_property def execution_scheduler_client(self) -> ExecutionScheduler: - return get_service_client(ExecutionScheduler) + return get_service_client(ExecutionScheduler, Config().execution_scheduler_port) @classmethod def handle_internal_http_error(cls, request: Request, exc: Exception): diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py b/autogpt_platform/backend/backend/util/cache.py similarity index 100% rename from autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py rename to autogpt_platform/backend/backend/util/cache.py diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index e1f742483a64..3c4532b5ad00 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -5,7 +5,6 @@ import threading import time import typing -from abc import ABC, abstractmethod from enum import Enum from types import NoneType, UnionType from typing import ( @@ -100,24 +99,16 @@ def custom_dict_to_class(qualname, data: dict): return custom_dict_to_class -class AppService(AppProcess, ABC): +class AppService(AppProcess): shared_event_loop: asyncio.AbstractEventLoop use_db: bool = False use_redis: bool = False use_supabase: bool = False - def __init__(self): + def __init__(self, port): + self.port = port self.uri = None - @classmethod - @abstractmethod - def get_port(cls) -> int: - pass - - @classmethod - def get_host(cls) -> str: - return os.environ.get(f"{cls.service_name.upper()}_HOST", Config().pyro_host) - def run_service(self) -> None: while True: time.sleep(10) @@ -166,7 +157,8 @@ def cleanup(self): @conn_retry("Pyro", "Starting Pyro Service") def __start_pyro(self): - daemon = Pyro5.api.Daemon(host=self.get_host(), port=self.get_port()) + host = Config().pyro_host + daemon = Pyro5.api.Daemon(host=host, port=self.port) self.uri = daemon.register(self, objectId=self.service_name) logger.info(f"[{self.service_name}] Connected to Pyro; URI = {self.uri}") daemon.requestLoop() @@ -175,20 +167,16 @@ def __start_async_loop(self): self.shared_event_loop.run_forever() -# --------- UTILITIES --------- # - - AS = TypeVar("AS", bound=AppService) -def get_service_client(service_type: Type[AS]) -> AS: +def get_service_client(service_type: Type[AS], port: int) -> AS: service_name = service_type.service_name class DynamicClient: @conn_retry("Pyro", f"Connecting to [{service_name}]") def __init__(self): - host = service_type.get_host() - port = service_type.get_port() + host = os.environ.get(f"{service_name.upper()}_HOST", "localhost") uri = f"PYRO:{service_type.service_name}@{host}:{port}" logger.debug(f"Connecting to service [{service_name}]. URI = {uri}") self.proxy = Pyro5.api.Proxy(uri) @@ -203,6 +191,8 @@ def __getattr__(self, name: str) -> Callable[..., Any]: return cast(AS, DynamicClient()) +# --------- UTILITIES --------- # + builtin_types = [*vars(builtins).values(), NoneType, Enum] diff --git a/autogpt_platform/backend/test/executor/test_scheduler.py b/autogpt_platform/backend/test/executor/test_scheduler.py index 49e46510a120..c0bcc8307925 100644 --- a/autogpt_platform/backend/test/executor/test_scheduler.py +++ b/autogpt_platform/backend/test/executor/test_scheduler.py @@ -5,6 +5,7 @@ from backend.server.model import CreateGraph from backend.usecases.sample import create_test_graph, create_test_user from backend.util.service import get_service_client +from backend.util.settings import Config from backend.util.test import SpinTestServer @@ -18,7 +19,10 @@ async def test_agent_schedule(server: SpinTestServer): user_id=test_user.id, ) - scheduler = get_service_client(ExecutionScheduler) + scheduler = get_service_client( + ExecutionScheduler, Config().execution_scheduler_port + ) + schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id) assert len(schedules) == 0 diff --git a/autogpt_platform/backend/test/util/test_service.py b/autogpt_platform/backend/test/util/test_service.py index a20810dbb1e5..e03063fff32d 100644 --- a/autogpt_platform/backend/test/util/test_service.py +++ b/autogpt_platform/backend/test/util/test_service.py @@ -7,11 +7,7 @@ class ServiceTest(AppService): def __init__(self): - super().__init__() - - @classmethod - def get_port(cls) -> int: - return TEST_SERVICE_PORT + super().__init__(port=TEST_SERVICE_PORT) @expose def add(self, a: int, b: int) -> int: @@ -32,7 +28,7 @@ async def add_async(a: int, b: int) -> int: @pytest.mark.asyncio(scope="session") async def test_service_creation(server): with ServiceTest(): - client = get_service_client(ServiceTest) + client = get_service_client(ServiceTest, TEST_SERVICE_PORT) assert client.add(5, 3) == 8 assert client.subtract(10, 4) == 6 assert client.fun_with_async(5, 3) == 8 From 17e79ad88df5e19345d66d46e513626b62d66c5a Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Wed, 23 Oct 2024 06:09:23 +0300 Subject: [PATCH 19/24] fix(backend): Fix DatabaseManager usage by calling it on-demand (#8404) --- .../store.py | 10 +++++-- .../autogpt_libs/utils}/cache.py | 0 .../backend/backend/executor/database.py | 6 +++- .../backend/backend/executor/manager.py | 15 ++++++---- .../backend/backend/executor/scheduler.py | 10 +++++-- .../backend/integrations/creds_manager.py | 7 ++--- .../backend/server/integrations/router.py | 3 +- .../backend/backend/server/rest_api.py | 15 +++++----- .../backend/backend/util/service.py | 28 +++++++++++++------ .../backend/test/executor/test_scheduler.py | 6 +--- .../backend/test/util/test_service.py | 8 ++++-- 11 files changed, 67 insertions(+), 41 deletions(-) rename autogpt_platform/{backend/backend/util => autogpt_libs/autogpt_libs/utils}/cache.py (100%) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py index f4ce921937e7..787683623313 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py @@ -6,6 +6,7 @@ from redis import Redis from backend.executor.database import DatabaseManager +from autogpt_libs.utils.cache import thread_cached_property from autogpt_libs.utils.synchronize import RedisKeyedMutex from .types import ( @@ -18,9 +19,14 @@ class SupabaseIntegrationCredentialsStore: - def __init__(self, redis: "Redis", db: "DatabaseManager"): - self.db_manager: DatabaseManager = db + def __init__(self, redis: "Redis"): self.locks = RedisKeyedMutex(redis) + + @thread_cached_property + def db_manager(self) -> "DatabaseManager": + from backend.executor.database import DatabaseManager + from backend.util.service import get_service_client + return get_service_client(DatabaseManager) def add_creds(self, user_id: str, credentials: Credentials) -> None: with self.locked_user_metadata(user_id): diff --git a/autogpt_platform/backend/backend/util/cache.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py similarity index 100% rename from autogpt_platform/backend/backend/util/cache.py rename to autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index aea2dd4177fc..b404ac6ecf17 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -27,11 +27,15 @@ class DatabaseManager(AppService): def __init__(self): - super().__init__(port=Config().database_api_port) + super().__init__() self.use_db = True self.use_redis = True self.event_queue = RedisEventQueue() + @classmethod + def get_port(cls) -> int: + return Config().database_api_port + @expose def send_execution_update(self, execution_result_dict: dict[Any, Any]): self.event_queue.put(ExecutionResult(**execution_result_dict)) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index ce6de9e9a623..033d2bec3de1 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -16,6 +16,8 @@ if TYPE_CHECKING: from backend.executor import DatabaseManager +from autogpt_libs.utils.cache import thread_cached + from backend.data import redis from backend.data.block import Block, BlockData, BlockInput, BlockType, get_block from backend.data.execution import ( @@ -31,7 +33,6 @@ from backend.data.model import CREDENTIALS_FIELD_NAME, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util import json -from backend.util.cache import thread_cached from backend.util.decorator import error_logged, time_measured from backend.util.logging import configure_logging from backend.util.process import set_service_name @@ -419,7 +420,7 @@ def on_node_executor_start(cls): redis.connect() cls.pid = os.getpid() cls.db_client = get_db_client() - cls.creds_manager = IntegrationCredentialsManager(db_manager=cls.db_client) + cls.creds_manager = IntegrationCredentialsManager() # Set up shutdown handlers cls.shutdown_lock = threading.Lock() @@ -659,20 +660,24 @@ def callback(_): class ExecutionManager(AppService): def __init__(self): - super().__init__(port=settings.config.execution_manager_port) + super().__init__() self.use_redis = True self.use_supabase = True self.pool_size = settings.config.num_graph_workers self.queue = ExecutionQueue[GraphExecution]() self.active_graph_runs: dict[str, tuple[Future, threading.Event]] = {} + @classmethod + def get_port(cls) -> int: + return settings.config.execution_manager_port + def run_service(self): from autogpt_libs.supabase_integration_credentials_store import ( SupabaseIntegrationCredentialsStore, ) self.credentials_store = SupabaseIntegrationCredentialsStore( - redis=redis.get_redis(), db=self.db_client + redis=redis.get_redis() ) self.executor = ProcessPoolExecutor( max_workers=self.pool_size, @@ -863,7 +868,7 @@ def _validate_node_input_credentials(self, graph: Graph, user_id: str): def get_db_client() -> "DatabaseManager": from backend.executor import DatabaseManager - return get_service_client(DatabaseManager, settings.config.database_api_port) + return get_service_client(DatabaseManager) @contextmanager diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index 574765c34845..979631c0585e 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -4,6 +4,7 @@ from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger +from autogpt_libs.utils.cache import thread_cached_property from backend.data.block import BlockInput from backend.data.schedule import ( @@ -14,7 +15,6 @@ update_schedule, ) from backend.executor.manager import ExecutionManager -from backend.util.cache import thread_cached_property from backend.util.service import AppService, expose, get_service_client from backend.util.settings import Config @@ -28,14 +28,18 @@ def log(msg, **kwargs): class ExecutionScheduler(AppService): def __init__(self, refresh_interval=10): - super().__init__(port=Config().execution_scheduler_port) + super().__init__() self.use_db = True self.last_check = datetime.min self.refresh_interval = refresh_interval + @classmethod + def get_port(cls) -> int: + return Config().execution_scheduler_port + @thread_cached_property def execution_client(self) -> ExecutionManager: - return get_service_client(ExecutionManager, Config().execution_manager_port) + return get_service_client(ExecutionManager) def run_service(self): scheduler = BackgroundScheduler() diff --git a/autogpt_platform/backend/backend/integrations/creds_manager.py b/autogpt_platform/backend/backend/integrations/creds_manager.py index 3bfde70e2817..96f9d1a3c56d 100644 --- a/autogpt_platform/backend/backend/integrations/creds_manager.py +++ b/autogpt_platform/backend/backend/integrations/creds_manager.py @@ -10,7 +10,6 @@ from redis.lock import Lock as RedisLock from backend.data import redis -from backend.executor.database import DatabaseManager from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings @@ -50,12 +49,10 @@ class IntegrationCredentialsManager: cause so much latency that it's worth implementing. """ - def __init__(self, db_manager: DatabaseManager): + def __init__(self): redis_conn = redis.get_redis() self._locks = RedisKeyedMutex(redis_conn) - self.store = SupabaseIntegrationCredentialsStore( - redis=redis_conn, db=db_manager - ) + self.store = SupabaseIntegrationCredentialsStore(redis=redis_conn) def create(self, user_id: str, credentials: Credentials) -> None: return self.store.add_creds(user_id, credentials) diff --git a/autogpt_platform/backend/backend/server/integrations/router.py b/autogpt_platform/backend/backend/server/integrations/router.py index 440ce7921cbe..5163de0b2fa3 100644 --- a/autogpt_platform/backend/backend/server/integrations/router.py +++ b/autogpt_platform/backend/backend/server/integrations/router.py @@ -10,7 +10,6 @@ from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request from pydantic import BaseModel, Field, SecretStr -from backend.executor.manager import get_db_client from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings @@ -21,7 +20,7 @@ settings = Settings() router = APIRouter() -creds_manager = IntegrationCredentialsManager(db_manager=get_db_client()) +creds_manager = IntegrationCredentialsManager() class LoginResponse(BaseModel): diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index edeb025803c6..f0d922c19686 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -7,6 +7,7 @@ import uvicorn from autogpt_libs.auth.middleware import auth_middleware +from autogpt_libs.utils.cache import thread_cached_property from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse @@ -19,10 +20,7 @@ from backend.data.credit import get_block_costs, get_user_credit_model from backend.data.user import get_or_create_user from backend.executor import ExecutionManager, ExecutionScheduler -from backend.executor.manager import get_db_client -from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.server.model import CreateGraph, SetGraphActiveVersion -from backend.util.cache import thread_cached_property from backend.util.service import AppService, get_service_client from backend.util.settings import AppEnvironment, Config, Settings @@ -37,9 +35,13 @@ class AgentServer(AppService): _user_credit_model = get_user_credit_model() def __init__(self): - super().__init__(port=Config().agent_server_port) + super().__init__() self.use_redis = True + @classmethod + def get_port(cls) -> int: + return Config().agent_server_port + @asynccontextmanager async def lifespan(self, _: FastAPI): await db.connect() @@ -98,7 +100,6 @@ def run_service(self): tags=["integrations"], dependencies=[Depends(auth_middleware)], ) - self.integration_creds_manager = IntegrationCredentialsManager(get_db_client()) api_router.include_router( backend.server.routers.analytics.router, @@ -308,11 +309,11 @@ async def wrapper(*args, **kwargs): @thread_cached_property def execution_manager_client(self) -> ExecutionManager: - return get_service_client(ExecutionManager, Config().execution_manager_port) + return get_service_client(ExecutionManager) @thread_cached_property def execution_scheduler_client(self) -> ExecutionScheduler: - return get_service_client(ExecutionScheduler, Config().execution_scheduler_port) + return get_service_client(ExecutionScheduler) @classmethod def handle_internal_http_error(cls, request: Request, exc: Exception): diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index 3c4532b5ad00..e1f742483a64 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -5,6 +5,7 @@ import threading import time import typing +from abc import ABC, abstractmethod from enum import Enum from types import NoneType, UnionType from typing import ( @@ -99,16 +100,24 @@ def custom_dict_to_class(qualname, data: dict): return custom_dict_to_class -class AppService(AppProcess): +class AppService(AppProcess, ABC): shared_event_loop: asyncio.AbstractEventLoop use_db: bool = False use_redis: bool = False use_supabase: bool = False - def __init__(self, port): - self.port = port + def __init__(self): self.uri = None + @classmethod + @abstractmethod + def get_port(cls) -> int: + pass + + @classmethod + def get_host(cls) -> str: + return os.environ.get(f"{cls.service_name.upper()}_HOST", Config().pyro_host) + def run_service(self) -> None: while True: time.sleep(10) @@ -157,8 +166,7 @@ def cleanup(self): @conn_retry("Pyro", "Starting Pyro Service") def __start_pyro(self): - host = Config().pyro_host - daemon = Pyro5.api.Daemon(host=host, port=self.port) + daemon = Pyro5.api.Daemon(host=self.get_host(), port=self.get_port()) self.uri = daemon.register(self, objectId=self.service_name) logger.info(f"[{self.service_name}] Connected to Pyro; URI = {self.uri}") daemon.requestLoop() @@ -167,16 +175,20 @@ def __start_async_loop(self): self.shared_event_loop.run_forever() +# --------- UTILITIES --------- # + + AS = TypeVar("AS", bound=AppService) -def get_service_client(service_type: Type[AS], port: int) -> AS: +def get_service_client(service_type: Type[AS]) -> AS: service_name = service_type.service_name class DynamicClient: @conn_retry("Pyro", f"Connecting to [{service_name}]") def __init__(self): - host = os.environ.get(f"{service_name.upper()}_HOST", "localhost") + host = service_type.get_host() + port = service_type.get_port() uri = f"PYRO:{service_type.service_name}@{host}:{port}" logger.debug(f"Connecting to service [{service_name}]. URI = {uri}") self.proxy = Pyro5.api.Proxy(uri) @@ -191,8 +203,6 @@ def __getattr__(self, name: str) -> Callable[..., Any]: return cast(AS, DynamicClient()) -# --------- UTILITIES --------- # - builtin_types = [*vars(builtins).values(), NoneType, Enum] diff --git a/autogpt_platform/backend/test/executor/test_scheduler.py b/autogpt_platform/backend/test/executor/test_scheduler.py index c0bcc8307925..49e46510a120 100644 --- a/autogpt_platform/backend/test/executor/test_scheduler.py +++ b/autogpt_platform/backend/test/executor/test_scheduler.py @@ -5,7 +5,6 @@ from backend.server.model import CreateGraph from backend.usecases.sample import create_test_graph, create_test_user from backend.util.service import get_service_client -from backend.util.settings import Config from backend.util.test import SpinTestServer @@ -19,10 +18,7 @@ async def test_agent_schedule(server: SpinTestServer): user_id=test_user.id, ) - scheduler = get_service_client( - ExecutionScheduler, Config().execution_scheduler_port - ) - + scheduler = get_service_client(ExecutionScheduler) schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id) assert len(schedules) == 0 diff --git a/autogpt_platform/backend/test/util/test_service.py b/autogpt_platform/backend/test/util/test_service.py index e03063fff32d..a20810dbb1e5 100644 --- a/autogpt_platform/backend/test/util/test_service.py +++ b/autogpt_platform/backend/test/util/test_service.py @@ -7,7 +7,11 @@ class ServiceTest(AppService): def __init__(self): - super().__init__(port=TEST_SERVICE_PORT) + super().__init__() + + @classmethod + def get_port(cls) -> int: + return TEST_SERVICE_PORT @expose def add(self, a: int, b: int) -> int: @@ -28,7 +32,7 @@ async def add_async(a: int, b: int) -> int: @pytest.mark.asyncio(scope="session") async def test_service_creation(server): with ServiceTest(): - client = get_service_client(ServiceTest, TEST_SERVICE_PORT) + client = get_service_client(ServiceTest) assert client.add(5, 3) == 8 assert client.subtract(10, 4) == 6 assert client.fun_with_async(5, 3) == 8 From 27c9ec5bcdad25122367e0f5fcbf46b4b12ee9ce Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Wed, 23 Oct 2024 06:39:15 +0300 Subject: [PATCH 20/24] fix(platform): Enable auth on local-backend mode by default (#8405) --- autogpt_platform/backend/.env.example | 4 ++-- autogpt_platform/backend/README.md | 5 ++-- autogpt_platform/backend/backend/data/user.py | 24 +++++++++---------- .../backend/backend/server/ws_api.py | 4 ++-- .../backend/backend/util/settings.py | 6 ++--- autogpt_platform/backend/backend/util/test.py | 2 +- autogpt_platform/backend/test/__init__.py | 3 +++ autogpt_platform/docker-compose.yml | 21 ++++++++++++++++ .../src/lib/autogpt-server-api/baseClient.ts | 2 +- 9 files changed, 47 insertions(+), 24 deletions(-) diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example index 0cb4d7abd723..0ec84ca83e66 100644 --- a/autogpt_platform/backend/.env.example +++ b/autogpt_platform/backend/.env.example @@ -20,13 +20,13 @@ PYRO_HOST=localhost SENTRY_DSN= ## User auth with Supabase is required for any of the 3rd party integrations with auth to work. -ENABLE_AUTH=false +ENABLE_AUTH=true SUPABASE_URL=http://localhost:8000 SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long # For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow for integrations to work. -# FRONTEND_BASE_URL=http://localhost:3000 +FRONTEND_BASE_URL=http://localhost:3000 ## == INTEGRATION CREDENTIALS == ## # Each set of server side credentials is required for the corresponding 3rd party diff --git a/autogpt_platform/backend/README.md b/autogpt_platform/backend/README.md index 00194b304e1c..ab91027df23a 100644 --- a/autogpt_platform/backend/README.md +++ b/autogpt_platform/backend/README.md @@ -58,7 +58,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes 6. Migrate the database. Be careful because this deletes current data in the database. ```sh - docker compose up db redis -d + docker compose up db -d poetry run prisma migrate deploy ``` @@ -66,9 +66,10 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes ### Starting the server without Docker -Run the following command to build the dockerfiles: +Run the following command to run database in docker but the application locally: ```sh +docker compose --profile local up deps --build --detach poetry run app ``` diff --git a/autogpt_platform/backend/backend/data/user.py b/autogpt_platform/backend/backend/data/user.py index 6c8696379e37..477b3bae6526 100644 --- a/autogpt_platform/backend/backend/data/user.py +++ b/autogpt_platform/backend/backend/data/user.py @@ -37,19 +37,17 @@ async def get_user_by_id(user_id: str) -> Optional[User]: return User.model_validate(user) if user else None -async def create_default_user(enable_auth: str) -> Optional[User]: - if not enable_auth.lower() == "true": - user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID}) - if not user: - user = await prisma.user.create( - data={ - "id": DEFAULT_USER_ID, - "email": "default@example.com", - "name": "Default User", - } - ) - return User.model_validate(user) - return None +async def create_default_user() -> Optional[User]: + user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID}) + if not user: + user = await prisma.user.create( + data={ + "id": DEFAULT_USER_ID, + "email": "default@example.com", + "name": "Default User", + } + ) + return User.model_validate(user) async def get_user_metadata(user_id: str) -> UserMetadataRaw: diff --git a/autogpt_platform/backend/backend/server/ws_api.py b/autogpt_platform/backend/backend/server/ws_api.py index 5e35693be444..8cbda22e1ed1 100644 --- a/autogpt_platform/backend/backend/server/ws_api.py +++ b/autogpt_platform/backend/backend/server/ws_api.py @@ -28,7 +28,7 @@ async def lifespan(app: FastAPI): docs_url = "/docs" if settings.config.app_env == AppEnvironment.LOCAL else None -app = FastAPI(lifespan=lifespan) +app = FastAPI(lifespan=lifespan, docs_url=docs_url) _connection_manager = None logger.info(f"CORS allow origins: {settings.config.backend_cors_allow_origins}") @@ -66,7 +66,7 @@ async def event_broadcaster(manager: ConnectionManager): async def authenticate_websocket(websocket: WebSocket) -> str: - if settings.config.enable_auth.lower() == "true": + if settings.config.enable_auth: token = websocket.query_params.get("token") if not token: await websocket.close(code=4001, reason="Missing authentication token") diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 1ac875fdf92a..f521bf104f7d 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -69,8 +69,8 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): default="localhost", description="The default hostname of the Pyro server.", ) - enable_auth: str = Field( - default="false", + enable_auth: bool = Field( + default=True, description="If authentication is enabled or not", ) enable_credit: str = Field( @@ -133,7 +133,7 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): ) frontend_base_url: str = Field( - default="", + default="http://localhost:3000", description="Can be used to explicitly set the base URL for the frontend. " "This value is then used to generate redirect URLs for OAuth flows.", ) diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index 704d6507e081..d1e2d83f7f3f 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -31,7 +31,7 @@ async def __aenter__(self): await db.connect() await initialize_blocks() - await create_default_user("false") + await create_default_user() return self diff --git a/autogpt_platform/backend/test/__init__.py b/autogpt_platform/backend/test/__init__.py index e69de29bb2d1..d10438719da5 100644 --- a/autogpt_platform/backend/test/__init__.py +++ b/autogpt_platform/backend/test/__init__.py @@ -0,0 +1,3 @@ +import os + +os.environ["ENABLE_AUTH"] = "false" diff --git a/autogpt_platform/docker-compose.yml b/autogpt_platform/docker-compose.yml index be6f1f49ede5..a1ae16b3ea63 100644 --- a/autogpt_platform/docker-compose.yml +++ b/autogpt_platform/docker-compose.yml @@ -142,3 +142,24 @@ services: extends: file: ./supabase/docker/docker-compose.yml service: vector + + deps: + <<: *supabase-services + profiles: + - local + image: busybox + command: /bin/true + depends_on: + - studio + - kong + - auth + - rest + - realtime + - storage + - imgproxy + - meta + - functions + - analytics + - db + - vector + - redis diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/baseClient.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/baseClient.ts index dc80bb0401d4..60f2a33369ab 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/baseClient.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/baseClient.ts @@ -273,10 +273,10 @@ export default class BaseAutoGPTServerAPI { if ( response.status === 403 && - response.statusText === "Not authenticated" && typeof window !== "undefined" // Check if in browser environment ) { window.location.href = "/login"; + return null; } let errorDetail; From 2715b81ff55d88a000987444b76dba1a6de8f666 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Wed, 23 Oct 2024 09:21:12 +0100 Subject: [PATCH 21/24] feat(platform/ci): Run migrations in ci for dev (#8395) * run migrations in ci * update environment * temp false * add dev migrations * remove code change step --- .../workflows/platform-autogpt-deploy.yaml | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/.github/workflows/platform-autogpt-deploy.yaml b/.github/workflows/platform-autogpt-deploy.yaml index 97c2fe78749e..549eb0478115 100644 --- a/.github/workflows/platform-autogpt-deploy.yaml +++ b/.github/workflows/platform-autogpt-deploy.yaml @@ -19,8 +19,42 @@ env: NAMESPACE: dev-agpt jobs: + migrate: + environment: develop + name: Run migrations for AutoGPT Platform + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install prisma + + - name: Run Backend Migrations + working-directory: ./autogpt_platform/backend + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }} + + - name: Run Market Migrations + working-directory: ./autogpt_platform/market + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }} + build-push-deploy: name: Build, Push, and Deploy + needs: migrate runs-on: ubuntu-latest steps: From 37607d104c98ab74c47af88da36ee92fb8a56f67 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Wed, 23 Oct 2024 09:22:45 +0100 Subject: [PATCH 22/24] feat(platform/infra): Create prod service account and pool (#8383) * ci with workload identity * temp update * update name * wip * update auth step * update provider name * remove audience * temp set to false * update registry naming * update context * update login * revert temp updates * add prod iam and pool --- .../infra/terraform/environments/prod.tfvars | 44 +++++++++++++++++-- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/autogpt_platform/infra/terraform/environments/prod.tfvars b/autogpt_platform/infra/terraform/environments/prod.tfvars index e9351389285a..4bceda49957a 100644 --- a/autogpt_platform/infra/terraform/environments/prod.tfvars +++ b/autogpt_platform/infra/terraform/environments/prod.tfvars @@ -28,6 +28,11 @@ service_accounts = { "prod-agpt-market-sa" = { display_name = "AutoGPT prod Market backend Account" description = "Service account for agpt prod market backend" + }, + "prod-github-actions-workload-identity" = { + service_account_name = "prod-github-actions-sa" + namespace = "prod-agpt" + ksa_name = "prod-github-actions-sa" } } @@ -59,7 +64,8 @@ role_bindings = { "serviceAccount:prod-agpt-backend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-frontend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-ws-backend-sa@agpt-prod.iam.gserviceaccount.com", - "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com" + "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com", + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" ], "roles/cloudsql.client" = [ "serviceAccount:prod-agpt-backend-sa@agpt-prod.iam.gserviceaccount.com", @@ -80,7 +86,8 @@ role_bindings = { "serviceAccount:prod-agpt-backend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-frontend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-ws-backend-sa@agpt-prod.iam.gserviceaccount.com", - "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com" + "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com", + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" ] "roles/compute.networkUser" = [ "serviceAccount:prod-agpt-backend-sa@agpt-prod.iam.gserviceaccount.com", @@ -93,6 +100,16 @@ role_bindings = { "serviceAccount:prod-agpt-frontend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-ws-backend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com" + ], + "roles/artifactregistry.writer" = [ + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" + ], + "roles/container.viewer" = [ + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" + ], + "roles/iam.serviceAccountTokenCreator" = [ + "principalSet://iam.googleapis.com/projects/638488734936/locations/global/workloadIdentityPools/prod-pool/*", + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" ] } @@ -101,4 +118,25 @@ services_ip_cidr_range = "10.2.0.0/20" public_bucket_names = ["website-artifacts"] standard_bucket_names = [] -bucket_admins = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] \ No newline at end of file +bucket_admins = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] + +workload_identity_pools = { + "dev-pool" = { + display_name = "Production Identity Pool" + providers = { + "github" = { + issuer_uri = "https://token.actions.githubusercontent.com" + attribute_mapping = { + "google.subject" = "assertion.sub" + "attribute.repository" = "assertion.repository" + "attribute.repository_owner" = "assertion.repository_owner" + } + } + } + service_accounts = { + "prod-github-actions-sa" = [ + "Significant-Gravitas/AutoGPT" + ] + } + } +} \ No newline at end of file From 404d0638defbb3974ee28dba700988babc07c079 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Wed, 23 Oct 2024 09:23:54 +0100 Subject: [PATCH 23/24] feat(platform/ci) Add workflow for deploying to production (#8384) * ci with workload identity * temp update * update name * wip * update auth step * update provider name * remove audience * temp set to false * update registry naming * update context * update login * revert temp updates * add prod iam and pool * add release deploy with approval * use gha default approval behaviour * add back in release trigger * add new line --- .../workflows/platform-autgpt-deploy-prod.yml | 149 ++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 .github/workflows/platform-autgpt-deploy-prod.yml diff --git a/.github/workflows/platform-autgpt-deploy-prod.yml b/.github/workflows/platform-autgpt-deploy-prod.yml new file mode 100644 index 000000000000..b0d94526ed10 --- /dev/null +++ b/.github/workflows/platform-autgpt-deploy-prod.yml @@ -0,0 +1,149 @@ +name: AutoGPT Platform - Build, Push, and Deploy Prod Environment + +on: + release: + types: [published] + +permissions: + contents: 'read' + id-token: 'write' + +env: + PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + GKE_CLUSTER: prod-gke-cluster + GKE_ZONE: us-central1-a + NAMESPACE: prod-agpt + +jobs: + build-push-deploy: + environment: production + name: Build, Push, and Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - id: 'auth' + uses: 'google-github-actions/auth@v1' + with: + workload_identity_provider: 'projects/638488734936/locations/global/workloadIdentityPools/prod-pool/providers/github' + service_account: 'prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com' + token_format: 'access_token' + create_credentials_file: true + + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v1' + + - name: 'Configure Docker' + run: | + gcloud auth configure-docker us-east1-docker.pkg.dev + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + + - name: Check for changes + id: check_changes + run: | + git fetch origin master + BACKEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false") + FRONTEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false") + MARKET_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false") + echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT + echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT + echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT + + - name: Get GKE credentials + uses: 'google-github-actions/get-gke-credentials@v1' + with: + cluster_name: ${{ env.GKE_CLUSTER }} + location: ${{ env.GKE_ZONE }} + + - name: Build and Push Backend + if: steps.check_changes.outputs.backend_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/backend/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-prod/agpt-backend-prod/agpt-backend-prod:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Build and Push Frontend + if: steps.check_changes.outputs.frontend_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/frontend/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-prod/agpt-frontend-prod/agpt-frontend-prod:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Build and Push Market + if: steps.check_changes.outputs.market_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/market/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-prod/agpt-market-prod/agpt-market-prod:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + + - name: Set up Helm + uses: azure/setup-helm@v1 + with: + version: v3.4.0 + + - name: Deploy Backend + if: steps.check_changes.outputs.backend_changed == 'true' + run: | + helm upgrade autogpt-server ./autogpt-server \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-server/values.yaml \ + -f autogpt-server/values.prod.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Websocket + if: steps.check_changes.outputs.backend_changed == 'true' + run: | + helm upgrade autogpt-websocket-server ./autogpt-websocket-server \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-websocket-server/values.yaml \ + -f autogpt-websocket-server/values.prod.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Market + if: steps.check_changes.outputs.market_changed == 'true' + run: | + helm upgrade autogpt-market ./autogpt-market \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-market/values.yaml \ + -f autogpt-market/values.prod.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Frontend + if: steps.check_changes.outputs.frontend_changed == 'true' + run: | + helm upgrade autogpt-builder ./autogpt-builder \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-builder/values.yaml \ + -f autogpt-builder/values.prod.yaml \ + --set image.tag=${{ github.sha }} From d9b8e0d273a64422465e1d55ac493893695b4ac2 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Wed, 23 Oct 2024 09:25:33 +0100 Subject: [PATCH 24/24] feat(platform/ci): Add prod migrations (#8396) * ci with workload identity * temp update * update name * wip * update auth step * update provider name * remove audience * temp set to false * update registry naming * update context * update login * revert temp updates * add prod iam and pool * add release deploy with approval * use gha default approval behaviour * add back in release trigger * add new line * add prod migrations * prod migrations without check --- .../workflows/platform-autgpt-deploy-prod.yml | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/.github/workflows/platform-autgpt-deploy-prod.yml b/.github/workflows/platform-autgpt-deploy-prod.yml index b0d94526ed10..97013715db4d 100644 --- a/.github/workflows/platform-autgpt-deploy-prod.yml +++ b/.github/workflows/platform-autgpt-deploy-prod.yml @@ -15,6 +15,39 @@ env: NAMESPACE: prod-agpt jobs: + migrate: + environment: production + name: Run migrations for AutoGPT Platform + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install prisma + + - name: Run Backend Migrations + working-directory: ./autogpt_platform/backend + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }} + + - name: Run Market Migrations + working-directory: ./autogpt_platform/market + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }} + build-push-deploy: environment: production name: Build, Push, and Deploy