diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 98a69ae40b85..2895f601baf8 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -21,3 +21,16 @@ Here is a list of our critical paths, if you need some inspiration on what and h - Upload agent to marketplace - Import an agent from marketplace and confirm it executes correctly - Edit an agent from monitor, and confirm it executes correctly + +### Configuration Changes 📝 +> [!NOTE] +Only for the new autogpt platform, currently in autogpt_platform/ + +If you're making configuration or infrastructure changes, please remember to check you've updated the related infrastructure code in the autogpt_platform/infra folder. + +Examples of such changes might include: + +- Changing ports +- Adding new services that need to communicate with each other +- Secrets or environment variable changes +- New or infrastructure changes such as databases diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..68b6fc2b7c27 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,179 @@ +version: 2 +updates: + # autogpt_libs (Poetry project) + - package-ecosystem: "pip" + directory: "autogpt_platform/autogpt_libs" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + # backend (Poetry project) + - package-ecosystem: "pip" + directory: "autogpt_platform/backend" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # frontend (Next.js project) + - package-ecosystem: "npm" + directory: "autogpt_platform/frontend" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # infra (Terraform) + - package-ecosystem: "terraform" + directory: "autogpt_platform/infra" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # market (Poetry project) + - package-ecosystem: "pip" + directory: "autogpt_platform/market" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Docker + - package-ecosystem: "docker" + directory: "autogpt_platform/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Submodules + - package-ecosystem: "gitsubmodule" + directory: "autogpt_platform/supabase" + schedule: + interval: "weekly" + open-pull-requests-limit: 1 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Docs + - package-ecosystem: 'pip' + directory: "docs/" + schedule: + interval: "weekly" + open-pull-requests-limit: 1 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" diff --git a/.github/workflows/platform-autgpt-deploy-prod.yml b/.github/workflows/platform-autgpt-deploy-prod.yml new file mode 100644 index 000000000000..97013715db4d --- /dev/null +++ b/.github/workflows/platform-autgpt-deploy-prod.yml @@ -0,0 +1,182 @@ +name: AutoGPT Platform - Build, Push, and Deploy Prod Environment + +on: + release: + types: [published] + +permissions: + contents: 'read' + id-token: 'write' + +env: + PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + GKE_CLUSTER: prod-gke-cluster + GKE_ZONE: us-central1-a + NAMESPACE: prod-agpt + +jobs: + migrate: + environment: production + name: Run migrations for AutoGPT Platform + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install prisma + + - name: Run Backend Migrations + working-directory: ./autogpt_platform/backend + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }} + + - name: Run Market Migrations + working-directory: ./autogpt_platform/market + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }} + + build-push-deploy: + environment: production + name: Build, Push, and Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - id: 'auth' + uses: 'google-github-actions/auth@v1' + with: + workload_identity_provider: 'projects/638488734936/locations/global/workloadIdentityPools/prod-pool/providers/github' + service_account: 'prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com' + token_format: 'access_token' + create_credentials_file: true + + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v1' + + - name: 'Configure Docker' + run: | + gcloud auth configure-docker us-east1-docker.pkg.dev + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + + - name: Check for changes + id: check_changes + run: | + git fetch origin master + BACKEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false") + FRONTEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false") + MARKET_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false") + echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT + echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT + echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT + + - name: Get GKE credentials + uses: 'google-github-actions/get-gke-credentials@v1' + with: + cluster_name: ${{ env.GKE_CLUSTER }} + location: ${{ env.GKE_ZONE }} + + - name: Build and Push Backend + if: steps.check_changes.outputs.backend_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/backend/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-prod/agpt-backend-prod/agpt-backend-prod:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Build and Push Frontend + if: steps.check_changes.outputs.frontend_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/frontend/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-prod/agpt-frontend-prod/agpt-frontend-prod:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Build and Push Market + if: steps.check_changes.outputs.market_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/market/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-prod/agpt-market-prod/agpt-market-prod:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + + - name: Set up Helm + uses: azure/setup-helm@v1 + with: + version: v3.4.0 + + - name: Deploy Backend + if: steps.check_changes.outputs.backend_changed == 'true' + run: | + helm upgrade autogpt-server ./autogpt-server \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-server/values.yaml \ + -f autogpt-server/values.prod.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Websocket + if: steps.check_changes.outputs.backend_changed == 'true' + run: | + helm upgrade autogpt-websocket-server ./autogpt-websocket-server \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-websocket-server/values.yaml \ + -f autogpt-websocket-server/values.prod.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Market + if: steps.check_changes.outputs.market_changed == 'true' + run: | + helm upgrade autogpt-market ./autogpt-market \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-market/values.yaml \ + -f autogpt-market/values.prod.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Frontend + if: steps.check_changes.outputs.frontend_changed == 'true' + run: | + helm upgrade autogpt-builder ./autogpt-builder \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-builder/values.yaml \ + -f autogpt-builder/values.prod.yaml \ + --set image.tag=${{ github.sha }} diff --git a/.github/workflows/platform-autogpt-deploy.yaml b/.github/workflows/platform-autogpt-deploy.yaml new file mode 100644 index 000000000000..549eb0478115 --- /dev/null +++ b/.github/workflows/platform-autogpt-deploy.yaml @@ -0,0 +1,186 @@ +name: AutoGPT Platform - Build, Push, and Deploy Dev Environment + +on: + push: + branches: [ dev ] + paths: + - 'autogpt_platform/backend/**' + - 'autogpt_platform/frontend/**' + - 'autogpt_platform/market/**' + +permissions: + contents: 'read' + id-token: 'write' + +env: + PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + GKE_CLUSTER: dev-gke-cluster + GKE_ZONE: us-central1-a + NAMESPACE: dev-agpt + +jobs: + migrate: + environment: develop + name: Run migrations for AutoGPT Platform + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install prisma + + - name: Run Backend Migrations + working-directory: ./autogpt_platform/backend + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }} + + - name: Run Market Migrations + working-directory: ./autogpt_platform/market + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }} + + build-push-deploy: + name: Build, Push, and Deploy + needs: migrate + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - id: 'auth' + uses: 'google-github-actions/auth@v1' + with: + workload_identity_provider: 'projects/638488734936/locations/global/workloadIdentityPools/dev-pool/providers/github' + service_account: 'dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com' + token_format: 'access_token' + create_credentials_file: true + + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v1' + + - name: 'Configure Docker' + run: | + gcloud auth configure-docker us-east1-docker.pkg.dev + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + + - name: Check for changes + id: check_changes + run: | + git fetch origin dev + BACKEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false") + FRONTEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false") + MARKET_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false") + echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT + echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT + echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT + + - name: Get GKE credentials + uses: 'google-github-actions/get-gke-credentials@v1' + with: + cluster_name: ${{ env.GKE_CLUSTER }} + location: ${{ env.GKE_ZONE }} + + - name: Build and Push Backend + if: steps.check_changes.outputs.backend_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/backend/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Build and Push Frontend + if: steps.check_changes.outputs.frontend_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/frontend/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-dev/agpt-frontend-dev/agpt-frontend-dev:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Build and Push Market + if: steps.check_changes.outputs.market_changed == 'true' + uses: docker/build-push-action@v2 + with: + context: . + file: ./autogpt_platform/market/Dockerfile + push: true + tags: us-east1-docker.pkg.dev/agpt-dev/agpt-market-dev/agpt-market-dev:${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + + - name: Set up Helm + uses: azure/setup-helm@v1 + with: + version: v3.4.0 + + - name: Deploy Backend + if: steps.check_changes.outputs.backend_changed == 'true' + run: | + helm upgrade autogpt-server ./autogpt-server \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-server/values.yaml \ + -f autogpt-server/values.dev.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Websocket + if: steps.check_changes.outputs.backend_changed == 'true' + run: | + helm upgrade autogpt-websocket-server ./autogpt-websocket-server \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-websocket-server/values.yaml \ + -f autogpt-websocket-server/values.dev.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Market + if: steps.check_changes.outputs.market_changed == 'true' + run: | + helm upgrade autogpt-market ./autogpt-market \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-market/values.yaml \ + -f autogpt-market/values.dev.yaml \ + --set image.tag=${{ github.sha }} + + - name: Deploy Frontend + if: steps.check_changes.outputs.frontend_changed == 'true' + run: | + helm upgrade autogpt-builder ./autogpt-builder \ + --namespace ${{ env.NAMESPACE }} \ + -f autogpt-builder/values.yaml \ + -f autogpt-builder/values.dev.yaml \ + --set image.tag=${{ github.sha }} \ No newline at end of file diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 4262cd6484ad..ce3633013bb9 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -39,10 +39,27 @@ jobs: runs-on: ubuntu-latest steps: + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + # this might remove tools that are actually needed, + # if set to "true" but frees about 6 GB + tool-cache: false + + # all of these default to true, but feel free to set to + # "false" if necessary for your workflow + android: false + dotnet: false + haskell: false + large-packages: true + docker-images: true + swap-storage: true + - name: Checkout repository uses: actions/checkout@v4 with: submodules: recursive + - name: Set up Node.js uses: actions/setup-node@v4 with: diff --git a/.github/workflows/repo-pr-enforce-base-branch.yml b/.github/workflows/repo-pr-enforce-base-branch.yml new file mode 100644 index 000000000000..3d4bd9096a52 --- /dev/null +++ b/.github/workflows/repo-pr-enforce-base-branch.yml @@ -0,0 +1,21 @@ +name: Repo - Enforce dev as base branch +on: + pull_request_target: + branches: [ master ] + types: [ opened ] + +jobs: + check_pr_target: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Check if PR is from dev or hotfix + if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }} + run: | + gh pr comment ${{ github.event.number }} --repo "$REPO" \ + --body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.' + gh pr edit ${{ github.event.number }} --base dev --repo "$REPO" + env: + GITHUB_TOKEN: ${{ github.token }} + REPO: ${{ github.repository }} diff --git a/README.md b/README.md index 2371fa39f8d0..722da4223019 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,8 @@ To maintain a uniform standard and ensure seamless compatibility with many curre --- +## Stars stats +

@@ -167,3 +169,10 @@ To maintain a uniform standard and ensure seamless compatibility with many curre

+ + +## ⚡ Contributors + + + Contributors + diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py index 44cc9b60f4a1..787683623313 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py @@ -1,11 +1,12 @@ import secrets from datetime import datetime, timedelta, timezone -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING if TYPE_CHECKING: from redis import Redis - from supabase import Client + from backend.executor.database import DatabaseManager +from autogpt_libs.utils.cache import thread_cached_property from autogpt_libs.utils.synchronize import RedisKeyedMutex from .types import ( @@ -18,9 +19,14 @@ class SupabaseIntegrationCredentialsStore: - def __init__(self, supabase: "Client", redis: "Redis"): - self.supabase = supabase + def __init__(self, redis: "Redis"): self.locks = RedisKeyedMutex(redis) + + @thread_cached_property + def db_manager(self) -> "DatabaseManager": + from backend.executor.database import DatabaseManager + from backend.util.service import get_service_client + return get_service_client(DatabaseManager) def add_creds(self, user_id: str, credentials: Credentials) -> None: with self.locked_user_metadata(user_id): @@ -35,7 +41,9 @@ def add_creds(self, user_id: str, credentials: Credentials) -> None: def get_all_creds(self, user_id: str) -> list[Credentials]: user_metadata = self._get_user_metadata(user_id) - return UserMetadata.model_validate(user_metadata).integration_credentials + return UserMetadata.model_validate( + user_metadata.model_dump() + ).integration_credentials def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None: all_credentials = self.get_all_creds(user_id) @@ -90,9 +98,7 @@ def delete_creds_by_id(self, user_id: str, credentials_id: str) -> None: ] self._set_user_integration_creds(user_id, filtered_credentials) - async def store_state_token( - self, user_id: str, provider: str, scopes: list[str] - ) -> str: + def store_state_token(self, user_id: str, provider: str, scopes: list[str]) -> str: token = secrets.token_urlsafe(32) expires_at = datetime.now(timezone.utc) + timedelta(minutes=10) @@ -105,17 +111,17 @@ async def store_state_token( with self.locked_user_metadata(user_id): user_metadata = self._get_user_metadata(user_id) - oauth_states = user_metadata.get("integration_oauth_states", []) + oauth_states = user_metadata.integration_oauth_states oauth_states.append(state.model_dump()) - user_metadata["integration_oauth_states"] = oauth_states + user_metadata.integration_oauth_states = oauth_states - self.supabase.auth.admin.update_user_by_id( - user_id, {"user_metadata": user_metadata} + self.db_manager.update_user_metadata( + user_id=user_id, metadata=user_metadata ) return token - async def get_any_valid_scopes_from_state_token( + def get_any_valid_scopes_from_state_token( self, user_id: str, token: str, provider: str ) -> list[str]: """ @@ -126,7 +132,7 @@ async def get_any_valid_scopes_from_state_token( THE CODE FOR TOKENS. """ user_metadata = self._get_user_metadata(user_id) - oauth_states = user_metadata.get("integration_oauth_states", []) + oauth_states = user_metadata.integration_oauth_states now = datetime.now(timezone.utc) valid_state = next( @@ -145,10 +151,10 @@ async def get_any_valid_scopes_from_state_token( return [] - async def verify_state_token(self, user_id: str, token: str, provider: str) -> bool: + def verify_state_token(self, user_id: str, token: str, provider: str) -> bool: with self.locked_user_metadata(user_id): user_metadata = self._get_user_metadata(user_id) - oauth_states = user_metadata.get("integration_oauth_states", []) + oauth_states = user_metadata.integration_oauth_states now = datetime.now(timezone.utc) valid_state = next( @@ -165,10 +171,8 @@ async def verify_state_token(self, user_id: str, token: str, provider: str) -> b if valid_state: # Remove the used state oauth_states.remove(valid_state) - user_metadata["integration_oauth_states"] = oauth_states - self.supabase.auth.admin.update_user_by_id( - user_id, {"user_metadata": user_metadata} - ) + user_metadata.integration_oauth_states = oauth_states + self.db_manager.update_user_metadata(user_id, user_metadata) return True return False @@ -177,19 +181,13 @@ def _set_user_integration_creds( self, user_id: str, credentials: list[Credentials] ) -> None: raw_metadata = self._get_user_metadata(user_id) - raw_metadata.update( - {"integration_credentials": [c.model_dump() for c in credentials]} - ) - self.supabase.auth.admin.update_user_by_id( - user_id, {"user_metadata": raw_metadata} - ) + raw_metadata.integration_credentials = [c.model_dump() for c in credentials] + self.db_manager.update_user_metadata(user_id, raw_metadata) def _get_user_metadata(self, user_id: str) -> UserMetadataRaw: - response = self.supabase.auth.admin.get_user_by_id(user_id) - if not response.user: - raise ValueError(f"User with ID {user_id} not found") - return cast(UserMetadataRaw, response.user.user_metadata) + metadata: UserMetadataRaw = self.db_manager.get_user_metadata(user_id=user_id) + return metadata def locked_user_metadata(self, user_id: str): - key = (self.supabase.supabase_url, f"user:{user_id}", "metadata") + key = (self.db_manager, f"user:{user_id}", "metadata") return self.locks.locked(key) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py index da39f6a842c2..0f973bb52484 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py @@ -56,6 +56,7 @@ class OAuthState(BaseModel): token: str provider: str expires_at: int + scopes: list[str] """Unix timestamp (seconds) indicating when this OAuth state expires""" @@ -64,6 +65,6 @@ class UserMetadata(BaseModel): integration_oauth_states: list[OAuthState] = Field(default_factory=list) -class UserMetadataRaw(TypedDict, total=False): - integration_credentials: list[dict] - integration_oauth_states: list[dict] +class UserMetadataRaw(BaseModel): + integration_credentials: list[dict] = Field(default_factory=list) + integration_oauth_states: list[dict] = Field(default_factory=list) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py new file mode 100644 index 000000000000..b4506dda47b8 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py @@ -0,0 +1,27 @@ +import threading +from functools import wraps +from typing import Callable, ParamSpec, TypeVar + +T = TypeVar("T") +P = ParamSpec("P") +R = TypeVar("R") + + +def thread_cached(func: Callable[P, R]) -> Callable[P, R]: + thread_local = threading.local() + + @wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + cache = getattr(thread_local, "cache", None) + if cache is None: + cache = thread_local.cache = {} + key = (args, tuple(sorted(kwargs.items()))) + if key not in cache: + cache[key] = func(*args, **kwargs) + return cache[key] + + return wrapper + + +def thread_cached_property(func: Callable[[T], R]) -> property: + return property(thread_cached(func)) diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example index eb5680b6252a..24757a7f1d9c 100644 --- a/autogpt_platform/backend/.env.example +++ b/autogpt_platform/backend/.env.example @@ -20,13 +20,13 @@ PYRO_HOST=localhost SENTRY_DSN= ## User auth with Supabase is required for any of the 3rd party integrations with auth to work. -ENABLE_AUTH=false +ENABLE_AUTH=true SUPABASE_URL=http://localhost:8000 SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long # For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow for integrations to work. -# FRONTEND_BASE_URL=http://localhost:3000 +FRONTEND_BASE_URL=http://localhost:3000 ## == INTEGRATION CREDENTIALS == ## # Each set of server side credentials is required for the corresponding 3rd party diff --git a/autogpt_platform/backend/Dockerfile b/autogpt_platform/backend/Dockerfile index f697db11982c..5795398d1fae 100644 --- a/autogpt_platform/backend/Dockerfile +++ b/autogpt_platform/backend/Dockerfile @@ -8,7 +8,7 @@ WORKDIR /app # Install build dependencies RUN apt-get update \ - && apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev postgresql-client git \ + && apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev libpq5 gettext libz-dev libssl-dev postgresql-client git \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/autogpt_platform/backend/README.advanced.md b/autogpt_platform/backend/README.advanced.md index 829a3d79262a..09e0f90fcc25 100644 --- a/autogpt_platform/backend/README.advanced.md +++ b/autogpt_platform/backend/README.advanced.md @@ -37,7 +37,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes 5. Generate the Prisma client ```sh - poetry run prisma generate --schema postgres/schema.prisma + poetry run prisma generate ``` @@ -61,7 +61,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes ```sh cd ../backend - prisma migrate dev --schema postgres/schema.prisma + prisma migrate deploy ``` ## Running The Server diff --git a/autogpt_platform/backend/README.md b/autogpt_platform/backend/README.md index fc0c6b3944d0..ab91027df23a 100644 --- a/autogpt_platform/backend/README.md +++ b/autogpt_platform/backend/README.md @@ -58,17 +58,18 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes 6. Migrate the database. Be careful because this deletes current data in the database. ```sh - docker compose up db redis -d - poetry run prisma migrate dev + docker compose up db -d + poetry run prisma migrate deploy ``` ## Running The Server ### Starting the server without Docker -Run the following command to build the dockerfiles: +Run the following command to run database in docker but the application locally: ```sh +docker compose --profile local up deps --build --detach poetry run app ``` diff --git a/autogpt_platform/backend/backend/blocks/__init__.py b/autogpt_platform/backend/backend/blocks/__init__.py index 1fd85aef4630..4fb89e3957ff 100644 --- a/autogpt_platform/backend/backend/blocks/__init__.py +++ b/autogpt_platform/backend/backend/blocks/__init__.py @@ -2,6 +2,7 @@ import os import re from pathlib import Path +from typing import Type, TypeVar from backend.data.block import Block @@ -24,28 +25,31 @@ AVAILABLE_MODULES.append(module) # Load all Block instances from the available modules -AVAILABLE_BLOCKS = {} +AVAILABLE_BLOCKS: dict[str, Type[Block]] = {} -def all_subclasses(clz): - subclasses = clz.__subclasses__() +T = TypeVar("T") + + +def all_subclasses(cls: Type[T]) -> list[Type[T]]: + subclasses = cls.__subclasses__() for subclass in subclasses: subclasses += all_subclasses(subclass) return subclasses -for cls in all_subclasses(Block): - name = cls.__name__ +for block_cls in all_subclasses(Block): + name = block_cls.__name__ - if cls.__name__.endswith("Base"): + if block_cls.__name__.endswith("Base"): continue - if not cls.__name__.endswith("Block"): + if not block_cls.__name__.endswith("Block"): raise ValueError( - f"Block class {cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end" + f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end" ) - block = cls() + block = block_cls.create() if not isinstance(block.id, str) or len(block.id) != 36: raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID") @@ -69,6 +73,17 @@ def all_subclasses(clz): f"{block.name} `error` field in output_schema must be a string" ) + # Make sure all fields in input_schema and output_schema are annotated and has a value + for field_name, field in [*input_schema.items(), *output_schema.items()]: + if field.annotation is None: + raise ValueError( + f"{block.name} has a field {field_name} that is not annotated" + ) + if field.json_schema_extra is None: + raise ValueError( + f"{block.name} has a field {field_name} not defined as SchemaField" + ) + for field in block.input_schema.model_fields.values(): if field.annotation is bool and field.default not in (True, False): raise ValueError(f"{block.name} has a boolean field with no default value") @@ -76,6 +91,6 @@ def all_subclasses(clz): if block.disabled: continue - AVAILABLE_BLOCKS[block.id] = block + AVAILABLE_BLOCKS[block.id] = block_cls __all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"] diff --git a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py index 127bb3ae8b4a..3fe92950c199 100644 --- a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py +++ b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py @@ -3,7 +3,6 @@ from enum import Enum import requests -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import BlockSecret, SchemaField, SecretField @@ -129,9 +128,13 @@ class Input(BlockSchema): description="""1. Use short and punctuated sentences\n\n2. Use linebreaks to create a new clip\n\n3. Text outside of brackets is spoken by the AI, and [text between brackets] will be used to guide the visual generation. For example, [close-up of a cat] will show a close-up of a cat.""", placeholder="[close-up of a cat] Meow!", ) - ratio: str = Field(description="Aspect ratio of the video", default="9 / 16") - resolution: str = Field(description="Resolution of the video", default="720p") - frame_rate: int = Field(description="Frame rate of the video", default=60) + ratio: str = SchemaField( + description="Aspect ratio of the video", default="9 / 16" + ) + resolution: str = SchemaField( + description="Resolution of the video", default="720p" + ) + frame_rate: int = SchemaField(description="Frame rate of the video", default=60) generation_preset: GenerationPreset = SchemaField( description="Generation preset for visual style - only effects AI generated visuals", default=GenerationPreset.LEONARDO, @@ -154,8 +157,8 @@ class Input(BlockSchema): ) class Output(BlockSchema): - video_url: str = Field(description="The URL of the created video") - error: str = Field(description="Error message if the request failed") + video_url: str = SchemaField(description="The URL of the created video") + error: str = SchemaField(description="Error message if the request failed") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py index 60992e0f4591..391d6b615aad 100644 --- a/autogpt_platform/backend/backend/blocks/basic.py +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -2,7 +2,6 @@ from typing import Any, List from jinja2 import BaseLoader, Environment -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType from backend.data.model import SchemaField @@ -19,18 +18,18 @@ class StoreValueBlock(Block): """ class Input(BlockSchema): - input: Any = Field( + input: Any = SchemaField( description="Trigger the block to produce the output. " "The value is only used when `data` is None." ) - data: Any = Field( + data: Any = SchemaField( description="The constant data to be retained in the block. " "This value is passed as `output`.", default=None, ) class Output(BlockSchema): - output: Any + output: Any = SchemaField(description="The stored data retained in the block.") def __init__(self): super().__init__( @@ -56,10 +55,10 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class PrintToConsoleBlock(Block): class Input(BlockSchema): - text: str + text: str = SchemaField(description="The text to print to the console.") class Output(BlockSchema): - status: str + status: str = SchemaField(description="The status of the print operation.") def __init__(self): super().__init__( @@ -79,12 +78,14 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class FindInDictionaryBlock(Block): class Input(BlockSchema): - input: Any = Field(description="Dictionary to lookup from") - key: str | int = Field(description="Key to lookup in the dictionary") + input: Any = SchemaField(description="Dictionary to lookup from") + key: str | int = SchemaField(description="Key to lookup in the dictionary") class Output(BlockSchema): - output: Any = Field(description="Value found for the given key") - missing: Any = Field(description="Value of the input that missing the key") + output: Any = SchemaField(description="Value found for the given key") + missing: Any = SchemaField( + description="Value of the input that missing the key" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/block.py b/autogpt_platform/backend/backend/blocks/block.py index a4bf8f6ac524..01e8af7238ea 100644 --- a/autogpt_platform/backend/backend/blocks/block.py +++ b/autogpt_platform/backend/backend/blocks/block.py @@ -3,6 +3,7 @@ from typing import Type from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class BlockInstallationBlock(Block): @@ -15,11 +16,17 @@ class BlockInstallationBlock(Block): """ class Input(BlockSchema): - code: str + code: str = SchemaField( + description="Python code of the block to be installed", + ) class Output(BlockSchema): - success: str - error: str + success: str = SchemaField( + description="Success message if the block is installed successfully", + ) + error: str = SchemaField( + description="Error message if the block installation fails", + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/csv.py b/autogpt_platform/backend/backend/blocks/csv.py index b53f6c5ac889..e78c8994737a 100644 --- a/autogpt_platform/backend/backend/blocks/csv.py +++ b/autogpt_platform/backend/backend/blocks/csv.py @@ -1,21 +1,49 @@ from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import ContributorDetails +from backend.data.model import ContributorDetails, SchemaField class ReadCsvBlock(Block): class Input(BlockSchema): - contents: str - delimiter: str = "," - quotechar: str = '"' - escapechar: str = "\\" - has_header: bool = True - skip_rows: int = 0 - strip: bool = True - skip_columns: list[str] = [] + contents: str = SchemaField( + description="The contents of the CSV file to read", + placeholder="a, b, c\n1,2,3\n4,5,6", + ) + delimiter: str = SchemaField( + description="The delimiter used in the CSV file", + default=",", + ) + quotechar: str = SchemaField( + description="The character used to quote fields", + default='"', + ) + escapechar: str = SchemaField( + description="The character used to escape the delimiter", + default="\\", + ) + has_header: bool = SchemaField( + description="Whether the CSV file has a header row", + default=True, + ) + skip_rows: int = SchemaField( + description="The number of rows to skip from the start of the file", + default=0, + ) + strip: bool = SchemaField( + description="Whether to strip whitespace from the values", + default=True, + ) + skip_columns: list[str] = SchemaField( + description="The columns to skip from the start of the row", + default=[], + ) class Output(BlockSchema): - row: dict[str, str] - all_data: list[dict[str, str]] + row: dict[str, str] = SchemaField( + description="The data produced from each row in the CSV file" + ) + all_data: list[dict[str, str]] = SchemaField( + description="All the data in the CSV file as a list of rows" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/discord.py b/autogpt_platform/backend/backend/blocks/discord.py index cd350a619916..e5414cd32727 100644 --- a/autogpt_platform/backend/backend/blocks/discord.py +++ b/autogpt_platform/backend/backend/blocks/discord.py @@ -2,10 +2,9 @@ import aiohttp import discord -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import BlockSecret, SecretField +from backend.data.model import BlockSecret, SchemaField, SecretField class ReadDiscordMessagesBlock(Block): @@ -13,16 +12,18 @@ class Input(BlockSchema): discord_bot_token: BlockSecret = SecretField( key="discord_bot_token", description="Discord bot token" ) - continuous_read: bool = Field( + continuous_read: bool = SchemaField( description="Whether to continuously read messages", default=True ) class Output(BlockSchema): - message_content: str = Field(description="The content of the message received") - channel_name: str = Field( + message_content: str = SchemaField( + description="The content of the message received" + ) + channel_name: str = SchemaField( description="The name of the channel the message was received from" ) - username: str = Field( + username: str = SchemaField( description="The username of the user who sent the message" ) @@ -134,13 +135,15 @@ class Input(BlockSchema): discord_bot_token: BlockSecret = SecretField( key="discord_bot_token", description="Discord bot token" ) - message_content: str = Field(description="The content of the message received") - channel_name: str = Field( + message_content: str = SchemaField( + description="The content of the message received" + ) + channel_name: str = SchemaField( description="The name of the channel the message was received from" ) class Output(BlockSchema): - status: str = Field( + status: str = SchemaField( description="The status of the operation (e.g., 'Message sent', 'Error')" ) diff --git a/autogpt_platform/backend/backend/blocks/email_block.py b/autogpt_platform/backend/backend/blocks/email_block.py index a7f0f82dcee7..79accb6d7d35 100644 --- a/autogpt_platform/backend/backend/blocks/email_block.py +++ b/autogpt_platform/backend/backend/blocks/email_block.py @@ -2,17 +2,17 @@ from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import BlockSecret, SchemaField, SecretField class EmailCredentials(BaseModel): - smtp_server: str = Field( + smtp_server: str = SchemaField( default="smtp.gmail.com", description="SMTP server address" ) - smtp_port: int = Field(default=25, description="SMTP port number") + smtp_port: int = SchemaField(default=25, description="SMTP port number") smtp_username: BlockSecret = SecretField(key="smtp_username") smtp_password: BlockSecret = SecretField(key="smtp_password") @@ -30,7 +30,7 @@ class Input(BlockSchema): body: str = SchemaField( description="Body of the email", placeholder="Enter the email body" ) - creds: EmailCredentials = Field( + creds: EmailCredentials = SchemaField( description="SMTP credentials", default=EmailCredentials(), ) diff --git a/autogpt_platform/backend/backend/blocks/http.py b/autogpt_platform/backend/backend/blocks/http.py index 04d893f84701..74a1d3d0bb0b 100644 --- a/autogpt_platform/backend/backend/blocks/http.py +++ b/autogpt_platform/backend/backend/blocks/http.py @@ -4,6 +4,7 @@ import requests from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class HttpMethod(Enum): @@ -18,15 +19,27 @@ class HttpMethod(Enum): class SendWebRequestBlock(Block): class Input(BlockSchema): - url: str - method: HttpMethod = HttpMethod.POST - headers: dict[str, str] = {} - body: object = {} + url: str = SchemaField( + description="The URL to send the request to", + placeholder="https://api.example.com", + ) + method: HttpMethod = SchemaField( + description="The HTTP method to use for the request", + default=HttpMethod.POST, + ) + headers: dict[str, str] = SchemaField( + description="The headers to include in the request", + default={}, + ) + body: object = SchemaField( + description="The body of the request", + default={}, + ) class Output(BlockSchema): - response: object - client_error: object - server_error: object + response: object = SchemaField(description="The response from the server") + client_error: object = SchemaField(description="The error on 4xx status codes") + server_error: object = SchemaField(description="The error on 5xx status codes") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/ideogram.py b/autogpt_platform/backend/backend/blocks/ideogram.py index 66dd22061447..6818a25371e2 100644 --- a/autogpt_platform/backend/backend/blocks/ideogram.py +++ b/autogpt_platform/backend/backend/blocks/ideogram.py @@ -75,28 +75,24 @@ class Input(BlockSchema): description="The name of the Image Generation Model, e.g., V_2", default=IdeogramModelName.V2, title="Image Generation Model", - enum=IdeogramModelName, advanced=False, ) aspect_ratio: AspectRatio = SchemaField( description="Aspect ratio for the generated image", default=AspectRatio.ASPECT_1_1, title="Aspect Ratio", - enum=AspectRatio, advanced=False, ) upscale: UpscaleOption = SchemaField( description="Upscale the generated image", default=UpscaleOption.NO_UPSCALE, title="Upscale Image", - enum=UpscaleOption, advanced=False, ) magic_prompt_option: MagicPromptOption = SchemaField( description="Whether to use MagicPrompt for enhancing the request", default=MagicPromptOption.AUTO, title="Magic Prompt Option", - enum=MagicPromptOption, advanced=True, ) seed: Optional[int] = SchemaField( @@ -109,7 +105,6 @@ class Input(BlockSchema): description="Style type to apply, applicable for V_2 and above", default=StyleType.AUTO, title="Style Type", - enum=StyleType, advanced=True, ) negative_prompt: Optional[str] = SchemaField( @@ -122,7 +117,6 @@ class Input(BlockSchema): description="Color palette preset name, choose 'None' to skip", default=ColorPalettePreset.NONE, title="Color Palette Preset", - enum=ColorPalettePreset, advanced=True, ) diff --git a/autogpt_platform/backend/backend/blocks/jina/_auth.py b/autogpt_platform/backend/backend/blocks/jina/_auth.py new file mode 100644 index 000000000000..c39443da47b8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/_auth.py @@ -0,0 +1,39 @@ +from typing import Literal + +from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials +from pydantic import SecretStr + +from backend.data.model import CredentialsField, CredentialsMetaInput + +JinaCredentials = APIKeyCredentials +JinaCredentialsInput = CredentialsMetaInput[ + Literal["jina"], + Literal["api_key"], +] + + +def JinaCredentialsField() -> JinaCredentialsInput: + """ + Creates a Jina credentials input on a block. + + """ + return CredentialsField( + provider="jina", + supported_credential_types={"api_key"}, + description="The Jina integration can be used with an API Key.", + ) + + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="jina", + api_key=SecretStr("mock-jina-api-key"), + title="Mock Jina API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} diff --git a/autogpt_platform/backend/backend/blocks/jina/chunking.py b/autogpt_platform/backend/backend/blocks/jina/chunking.py new file mode 100644 index 000000000000..f3b0c4a34b8e --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/chunking.py @@ -0,0 +1,69 @@ +import requests + +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class JinaChunkingBlock(Block): + class Input(BlockSchema): + texts: list = SchemaField(description="List of texts to chunk") + + credentials: JinaCredentialsInput = JinaCredentialsField() + max_chunk_length: int = SchemaField( + description="Maximum length of each chunk", default=1000 + ) + return_tokens: bool = SchemaField( + description="Whether to return token information", default=False + ) + + class Output(BlockSchema): + chunks: list = SchemaField(description="List of chunked texts") + tokens: list = SchemaField( + description="List of token information for each chunk", optional=True + ) + + def __init__(self): + super().__init__( + id="806fb15e-830f-4796-8692-557d300ff43c", + description="Chunks texts using Jina AI's segmentation service", + categories={BlockCategory.AI, BlockCategory.TEXT}, + input_schema=JinaChunkingBlock.Input, + output_schema=JinaChunkingBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: JinaCredentials, **kwargs + ) -> BlockOutput: + url = "https://segment.jina.ai/" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + + all_chunks = [] + all_tokens = [] + + for text in input_data.texts: + data = { + "content": text, + "return_tokens": str(input_data.return_tokens).lower(), + "return_chunks": "true", + "max_chunk_length": str(input_data.max_chunk_length), + } + + response = requests.post(url, headers=headers, json=data) + response.raise_for_status() + result = response.json() + + all_chunks.extend(result.get("chunks", [])) + if input_data.return_tokens: + all_tokens.extend(result.get("tokens", [])) + + yield "chunks", all_chunks + if input_data.return_tokens: + yield "tokens", all_tokens diff --git a/autogpt_platform/backend/backend/blocks/jina/embeddings.py b/autogpt_platform/backend/backend/blocks/jina/embeddings.py new file mode 100644 index 000000000000..a33acfec9eec --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/embeddings.py @@ -0,0 +1,44 @@ +import requests + +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class JinaEmbeddingBlock(Block): + class Input(BlockSchema): + texts: list = SchemaField(description="List of texts to embed") + credentials: JinaCredentialsInput = JinaCredentialsField() + model: str = SchemaField( + description="Jina embedding model to use", + default="jina-embeddings-v2-base-en", + ) + + class Output(BlockSchema): + embeddings: list = SchemaField(description="List of embeddings") + + def __init__(self): + super().__init__( + id="7c56b3ab-62e7-43a2-a2dc-4ec4245660b6", + description="Generates embeddings using Jina AI", + categories={BlockCategory.AI}, + input_schema=JinaEmbeddingBlock.Input, + output_schema=JinaEmbeddingBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: JinaCredentials, **kwargs + ) -> BlockOutput: + url = "https://api.jina.ai/v1/embeddings" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + data = {"input": input_data.texts, "model": input_data.model} + response = requests.post(url, headers=headers, json=data) + embeddings = [e["embedding"] for e in response.json()["data"]] + yield "embeddings", embeddings diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index f38b5f5da72d..1366429a542d 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -62,7 +62,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): GPT4_TURBO = "gpt-4-turbo" GPT3_5_TURBO = "gpt-3.5-turbo" # Anthropic models - CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620" + CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" # Groq models LLAMA3_8B = "llama3-8b-8192" @@ -96,25 +96,25 @@ def cost_factor(self) -> int: MODEL_METADATA = { - LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=60), - LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=30), - LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=10), - LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=12), - LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=11), - LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=8), - LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=14), - LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=13), - LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=6), - LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=9), - LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=7), - LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=6), - LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=7), - LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=10), + LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=16), + LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=4), + LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=1), + LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=3), + LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=10), + LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=1), + LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=4), + LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=1), + LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=1), + LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=1), # Limited to 16k during preview - LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=15), - LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=13), - LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=7), - LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=11), + LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=1), + LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=1), + LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=1), + LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=1), } for model in LlmModel: @@ -122,9 +122,23 @@ def cost_factor(self) -> int: raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}") +class MessageRole(str, Enum): + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + + +class Message(BlockSchema): + role: MessageRole + content: str + + class AIStructuredResponseGeneratorBlock(Block): class Input(BlockSchema): - prompt: str + prompt: str = SchemaField( + description="The prompt to send to the language model.", + placeholder="Enter your prompt here...", + ) expected_format: dict[str, str] = SchemaField( description="Expected format of the response. If provided, the response will be validated against this format. " "The keys should be the expected fields in the response, and the values should be the description of the field.", @@ -136,15 +150,34 @@ class Input(BlockSchema): advanced=False, ) api_key: BlockSecret = SecretField(value="") - sys_prompt: str = "" - retry: int = 3 + sys_prompt: str = SchemaField( + title="System Prompt", + default="", + description="The system prompt to provide additional context to the model.", + ) + conversation_history: list[Message] = SchemaField( + default=[], + description="The conversation history to provide context for the prompt.", + ) + retry: int = SchemaField( + title="Retry Count", + default=3, + description="Number of times to retry the LLM call if the response does not match the expected format.", + ) prompt_values: dict[str, str] = SchemaField( advanced=False, default={}, description="Values used to fill in the prompt." ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) class Output(BlockSchema): - response: dict[str, Any] - error: str + response: dict[str, Any] = SchemaField( + description="The response object generated by the language model." + ) + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( @@ -164,26 +197,47 @@ def __init__(self): }, test_output=("response", {"key1": "key1Value", "key2": "key2Value"}), test_mock={ - "llm_call": lambda *args, **kwargs: json.dumps( - { - "key1": "key1Value", - "key2": "key2Value", - } + "llm_call": lambda *args, **kwargs: ( + json.dumps( + { + "key1": "key1Value", + "key2": "key2Value", + } + ), + 0, + 0, ) }, ) @staticmethod def llm_call( - api_key: str, model: LlmModel, prompt: list[dict], json_format: bool - ) -> str: - provider = model.metadata.provider + api_key: str, + llm_model: LlmModel, + prompt: list[dict], + json_format: bool, + max_tokens: int | None = None, + ) -> tuple[str, int, int]: + """ + Args: + api_key: API key for the LLM provider. + llm_model: The LLM model to use. + prompt: The prompt to send to the LLM. + json_format: Whether the response should be in JSON format. + max_tokens: The maximum number of tokens to generate in the chat completion. + + Returns: + The response from the LLM. + The number of tokens used in the prompt. + The number of tokens used in the completion. + """ + provider = llm_model.metadata.provider if provider == "openai": openai.api_key = api_key response_format = None - if model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]: + if llm_model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]: sys_messages = [p["content"] for p in prompt if p["role"] == "system"] usr_messages = [p["content"] for p in prompt if p["role"] != "system"] prompt = [ @@ -194,11 +248,17 @@ def llm_call( response_format = {"type": "json_object"} response = openai.chat.completions.create( - model=model.value, + model=llm_model.value, messages=prompt, # type: ignore response_format=response_format, # type: ignore + max_completion_tokens=max_tokens, + ) + + return ( + response.choices[0].message.content or "", + response.usage.prompt_tokens if response.usage else 0, + response.usage.completion_tokens if response.usage else 0, ) - return response.choices[0].message.content or "" elif provider == "anthropic": system_messages = [p["content"] for p in prompt if p["role"] == "system"] sysprompt = " ".join(system_messages) @@ -216,13 +276,18 @@ def llm_call( client = anthropic.Anthropic(api_key=api_key) try: - response = client.messages.create( - model=model.value, - max_tokens=4096, + resp = client.messages.create( + model=llm_model.value, system=sysprompt, messages=messages, + max_tokens=max_tokens or 8192, + ) + + return ( + resp.content[0].text if resp.content else "", + resp.usage.input_tokens, + resp.usage.output_tokens, ) - return response.content[0].text if response.content else "" except anthropic.APIError as e: error_message = f"Anthropic API error: {str(e)}" logger.error(error_message) @@ -231,23 +296,35 @@ def llm_call( client = Groq(api_key=api_key) response_format = {"type": "json_object"} if json_format else None response = client.chat.completions.create( - model=model.value, + model=llm_model.value, messages=prompt, # type: ignore response_format=response_format, # type: ignore + max_tokens=max_tokens, + ) + return ( + response.choices[0].message.content or "", + response.usage.prompt_tokens if response.usage else 0, + response.usage.completion_tokens if response.usage else 0, ) - return response.choices[0].message.content or "" elif provider == "ollama": + sys_messages = [p["content"] for p in prompt if p["role"] == "system"] + usr_messages = [p["content"] for p in prompt if p["role"] != "system"] response = ollama.generate( - model=model.value, - prompt=prompt[0]["content"], + model=llm_model.value, + prompt=f"{sys_messages}\n\n{usr_messages}", + stream=False, + ) + return ( + response.get("response") or "", + response.get("prompt_eval_count") or 0, + response.get("eval_count") or 0, ) - return response["response"] else: raise ValueError(f"Unsupported LLM provider: {provider}") def run(self, input_data: Input, **kwargs) -> BlockOutput: logger.debug(f"Calling LLM with input data: {input_data}") - prompt = [] + prompt = [p.model_dump() for p in input_data.conversation_history] def trim_prompt(s: str) -> str: lines = s.strip().split("\n") @@ -276,7 +353,8 @@ def trim_prompt(s: str) -> str: ) prompt.append({"role": "system", "content": sys_prompt}) - prompt.append({"role": "user", "content": input_data.prompt}) + if input_data.prompt: + prompt.append({"role": "user", "content": input_data.prompt}) def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: try: @@ -292,19 +370,26 @@ def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: logger.info(f"LLM request: {prompt}") retry_prompt = "" - model = input_data.model + llm_model = input_data.model api_key = ( input_data.api_key.get_secret_value() - or LlmApiKeys[model.metadata.provider].get_secret_value() + or LlmApiKeys[llm_model.metadata.provider].get_secret_value() ) for retry_count in range(input_data.retry): try: - response_text = self.llm_call( + response_text, input_token, output_token = self.llm_call( api_key=api_key, - model=model, + llm_model=llm_model, prompt=prompt, json_format=bool(input_data.expected_format), + max_tokens=input_data.max_tokens, + ) + self.merge_stats( + { + "input_token_count": input_token, + "output_token_count": output_token, + } ) logger.info(f"LLM attempt-{retry_count} response: {response_text}") @@ -341,15 +426,25 @@ def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: ) prompt.append({"role": "user", "content": retry_prompt}) except Exception as e: - logger.error(f"Error calling LLM: {e}") + logger.exception(f"Error calling LLM: {e}") retry_prompt = f"Error calling LLM: {e}" + finally: + self.merge_stats( + { + "llm_call_count": retry_count + 1, + "llm_retry_count": retry_count, + } + ) raise RuntimeError(retry_prompt) class AITextGeneratorBlock(Block): class Input(BlockSchema): - prompt: str + prompt: str = SchemaField( + description="The prompt to send to the language model.", + placeholder="Enter your prompt here...", + ) model: LlmModel = SchemaField( title="LLM Model", default=LlmModel.GPT4_TURBO, @@ -357,15 +452,30 @@ class Input(BlockSchema): advanced=False, ) api_key: BlockSecret = SecretField(value="") - sys_prompt: str = "" - retry: int = 3 + sys_prompt: str = SchemaField( + title="System Prompt", + default="", + description="The system prompt to provide additional context to the model.", + ) + retry: int = SchemaField( + title="Retry Count", + default=3, + description="Number of times to retry the LLM call if the response does not match the expected format.", + ) prompt_values: dict[str, str] = SchemaField( advanced=False, default={}, description="Values used to fill in the prompt." ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) class Output(BlockSchema): - response: str - error: str + response: str = SchemaField( + description="The response generated by the language model." + ) + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( @@ -379,15 +489,11 @@ def __init__(self): test_mock={"llm_call": lambda *args, **kwargs: "Response text"}, ) - @staticmethod - def llm_call(input_data: AIStructuredResponseGeneratorBlock.Input) -> str: - object_block = AIStructuredResponseGeneratorBlock() - for output_name, output_data in object_block.run(input_data): - if output_name == "response": - return output_data["response"] - else: - raise RuntimeError(output_data) - raise ValueError("Failed to get a response from the LLM.") + def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> str: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response") + self.merge_stats(block.execution_stats) + return response["response"] def run(self, input_data: Input, **kwargs) -> BlockOutput: object_input_data = AIStructuredResponseGeneratorBlock.Input( @@ -406,22 +512,43 @@ class SummaryStyle(Enum): class AITextSummarizerBlock(Block): class Input(BlockSchema): - text: str + text: str = SchemaField( + description="The text to summarize.", + placeholder="Enter the text to summarize here...", + ) model: LlmModel = SchemaField( title="LLM Model", default=LlmModel.GPT4_TURBO, description="The language model to use for summarizing the text.", ) - focus: str = "general information" - style: SummaryStyle = SummaryStyle.CONCISE + focus: str = SchemaField( + title="Focus", + default="general information", + description="The topic to focus on in the summary", + ) + style: SummaryStyle = SchemaField( + title="Summary Style", + default=SummaryStyle.CONCISE, + description="The style of the summary to generate.", + ) api_key: BlockSecret = SecretField(value="") # TODO: Make this dynamic - max_tokens: int = 4000 # Adjust based on the model's context window - chunk_overlap: int = 100 # Overlap between chunks to maintain context + max_tokens: int = SchemaField( + title="Max Tokens", + default=4096, + description="The maximum number of tokens to generate in the chat completion.", + ge=1, + ) + chunk_overlap: int = SchemaField( + title="Chunk Overlap", + default=100, + description="The number of overlapping tokens between chunks to maintain context.", + ge=0, + ) class Output(BlockSchema): - summary: str - error: str + summary: str = SchemaField(description="The final summary of the text.") + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( @@ -470,15 +597,11 @@ def _split_text(text: str, max_tokens: int, overlap: int) -> list[str]: return chunks - @staticmethod - def llm_call( - input_data: AIStructuredResponseGeneratorBlock.Input, - ) -> dict[str, str]: - llm_block = AIStructuredResponseGeneratorBlock() - for output_name, output_data in llm_block.run(input_data): - if output_name == "response": - return output_data - raise ValueError("Failed to get a response from the LLM.") + def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> dict: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response") + self.merge_stats(block.execution_stats) + return response def _summarize_chunk(self, chunk: str, input_data: Input) -> str: prompt = f"Summarize the following text in a {input_data.style} form. Focus your summary on the topic of `{input_data.focus}` if present, otherwise just provide a general summary:\n\n```{chunk}```" @@ -527,17 +650,6 @@ def _combine_summaries(self, summaries: list[str], input_data: Input) -> str: ] # Get the first yielded value -class MessageRole(str, Enum): - SYSTEM = "system" - USER = "user" - ASSISTANT = "assistant" - - -class Message(BlockSchema): - role: MessageRole - content: str - - class AIConversationBlock(Block): class Input(BlockSchema): messages: List[Message] = SchemaField( @@ -552,9 +664,9 @@ class Input(BlockSchema): value="", description="API key for the chosen language model provider." ) max_tokens: int | None = SchemaField( + advanced=True, default=None, description="The maximum number of tokens to generate in the chat completion.", - ge=1, ) class Output(BlockSchema): @@ -592,62 +704,22 @@ def __init__(self): }, ) - @staticmethod - def llm_call( - api_key: str, - model: LlmModel, - messages: List[dict[str, str]], - max_tokens: int | None = None, - ) -> str: - provider = model.metadata.provider - - if provider == "openai": - openai.api_key = api_key - response = openai.chat.completions.create( - model=model.value, - messages=messages, # type: ignore - max_tokens=max_tokens, - ) - return response.choices[0].message.content or "" - elif provider == "anthropic": - client = anthropic.Anthropic(api_key=api_key) - response = client.messages.create( - model=model.value, - max_tokens=max_tokens or 4096, - messages=messages, # type: ignore - ) - return response.content[0].text if response.content else "" - elif provider == "groq": - client = Groq(api_key=api_key) - response = client.chat.completions.create( - model=model.value, - messages=messages, # type: ignore - max_tokens=max_tokens, - ) - return response.choices[0].message.content or "" - elif provider == "ollama": - response = ollama.chat( - model=model.value, - messages=messages, # type: ignore - stream=False, # type: ignore - ) - return response["message"]["content"] - else: - raise ValueError(f"Unsupported LLM provider: {provider}") + def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> str: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response") + self.merge_stats(block.execution_stats) + return response["response"] def run(self, input_data: Input, **kwargs) -> BlockOutput: - api_key = ( - input_data.api_key.get_secret_value() - or LlmApiKeys[input_data.model.metadata.provider].get_secret_value() - ) - - messages = [message.model_dump() for message in input_data.messages] - response = self.llm_call( - api_key=api_key, - model=input_data.model, - messages=messages, - max_tokens=input_data.max_tokens, + AIStructuredResponseGeneratorBlock.Input( + prompt="", + api_key=input_data.api_key, + model=input_data.model, + conversation_history=input_data.messages, + max_tokens=input_data.max_tokens, + expected_format={}, + ) ) yield "response", response @@ -680,6 +752,11 @@ class Input(BlockSchema): ge=1, le=5, ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) class Output(BlockSchema): generated_list: List[str] = SchemaField(description="The generated list.") @@ -734,11 +811,8 @@ def llm_call( input_data: AIStructuredResponseGeneratorBlock.Input, ) -> dict[str, str]: llm_block = AIStructuredResponseGeneratorBlock() - for output_name, output_data in llm_block.run(input_data): - if output_name == "response": - logger.debug(f"Received response from LLM: {output_data}") - return output_data - raise ValueError("Failed to get a response from the LLM.") + response = llm_block.run_once(input_data, "response") + return response @staticmethod def string_to_list(string): diff --git a/autogpt_platform/backend/backend/blocks/medium.py b/autogpt_platform/backend/backend/blocks/medium.py index 04ebe8fab012..1d85e0978082 100644 --- a/autogpt_platform/backend/backend/blocks/medium.py +++ b/autogpt_platform/backend/backend/blocks/medium.py @@ -1,3 +1,4 @@ +from enum import Enum from typing import List import requests @@ -6,6 +7,12 @@ from backend.data.model import BlockSecret, SchemaField, SecretField +class PublishToMediumStatus(str, Enum): + PUBLIC = "public" + DRAFT = "draft" + UNLISTED = "unlisted" + + class PublishToMediumBlock(Block): class Input(BlockSchema): author_id: BlockSecret = SecretField( @@ -34,9 +41,9 @@ class Input(BlockSchema): description="The original home of this content, if it was originally published elsewhere", placeholder="https://yourblog.com/original-post", ) - publish_status: str = SchemaField( - description="The publish status: 'public', 'draft', or 'unlisted'", - placeholder="public", + publish_status: PublishToMediumStatus = SchemaField( + description="The publish status", + placeholder=PublishToMediumStatus.DRAFT, ) license: str = SchemaField( default="all-rights-reserved", @@ -79,7 +86,7 @@ def __init__(self): "tags": ["test", "automation"], "license": "all-rights-reserved", "notify_followers": False, - "publish_status": "draft", + "publish_status": PublishToMediumStatus.DRAFT.value, "api_key": "your_test_api_key", }, test_output=[ diff --git a/autogpt_platform/backend/backend/blocks/pinecone.py b/autogpt_platform/backend/backend/blocks/pinecone.py new file mode 100644 index 000000000000..91364fce92f0 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/pinecone.py @@ -0,0 +1,131 @@ +from typing import Literal + +from autogpt_libs.supabase_integration_credentials_store import APIKeyCredentials +from pinecone import Pinecone, ServerlessSpec + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField + +PineconeCredentials = APIKeyCredentials +PineconeCredentialsInput = CredentialsMetaInput[ + Literal["pinecone"], + Literal["api_key"], +] + + +def PineconeCredentialsField() -> PineconeCredentialsInput: + """ + Creates a Pinecone credentials input on a block. + + """ + return CredentialsField( + provider="pinecone", + supported_credential_types={"api_key"}, + description="The Pinecone integration can be used with an API Key.", + ) + + +class PineconeInitBlock(Block): + class Input(BlockSchema): + credentials: PineconeCredentialsInput = PineconeCredentialsField() + index_name: str = SchemaField(description="Name of the Pinecone index") + dimension: int = SchemaField( + description="Dimension of the vectors", default=768 + ) + metric: str = SchemaField( + description="Distance metric for the index", default="cosine" + ) + cloud: str = SchemaField( + description="Cloud provider for serverless", default="aws" + ) + region: str = SchemaField( + description="Region for serverless", default="us-east-1" + ) + + class Output(BlockSchema): + index: str = SchemaField(description="Name of the initialized Pinecone index") + message: str = SchemaField(description="Status message") + + def __init__(self): + super().__init__( + id="48d8fdab-8f03-41f3-8407-8107ba11ec9b", + description="Initializes a Pinecone index", + categories={BlockCategory.LOGIC}, + input_schema=PineconeInitBlock.Input, + output_schema=PineconeInitBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + pc = Pinecone(api_key=credentials.api_key.get_secret_value()) + + try: + existing_indexes = pc.list_indexes() + if input_data.index_name not in [index.name for index in existing_indexes]: + pc.create_index( + name=input_data.index_name, + dimension=input_data.dimension, + metric=input_data.metric, + spec=ServerlessSpec( + cloud=input_data.cloud, region=input_data.region + ), + ) + message = f"Created new index: {input_data.index_name}" + else: + message = f"Using existing index: {input_data.index_name}" + + yield "index", input_data.index_name + yield "message", message + except Exception as e: + yield "message", f"Error initializing Pinecone index: {str(e)}" + + +class PineconeQueryBlock(Block): + class Input(BlockSchema): + credentials: PineconeCredentialsInput = PineconeCredentialsField() + query_vector: list = SchemaField(description="Query vector") + namespace: str = SchemaField( + description="Namespace to query in Pinecone", default="" + ) + top_k: int = SchemaField( + description="Number of top results to return", default=3 + ) + include_values: bool = SchemaField( + description="Whether to include vector values in the response", + default=False, + ) + include_metadata: bool = SchemaField( + description="Whether to include metadata in the response", default=True + ) + host: str = SchemaField(description="Host for pinecone") + + class Output(BlockSchema): + results: dict = SchemaField(description="Query results from Pinecone") + + def __init__(self): + super().__init__( + id="9ad93d0f-91b4-4c9c-8eb1-82e26b4a01c5", + description="Queries a Pinecone index", + categories={BlockCategory.LOGIC}, + input_schema=PineconeQueryBlock.Input, + output_schema=PineconeQueryBlock.Output, + ) + + def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + **kwargs, + ) -> BlockOutput: + pc = Pinecone(api_key=credentials.api_key.get_secret_value()) + idx = pc.Index(host=input_data.host) + results = idx.query( + namespace=input_data.namespace, + vector=input_data.query_vector, + top_k=input_data.top_k, + include_values=input_data.include_values, + include_metadata=input_data.include_metadata, + ) + yield "results", results diff --git a/autogpt_platform/backend/backend/blocks/reddit.py b/autogpt_platform/backend/backend/blocks/reddit.py index 065436ae7321..9e4f3f3aca0b 100644 --- a/autogpt_platform/backend/backend/blocks/reddit.py +++ b/autogpt_platform/backend/backend/blocks/reddit.py @@ -2,10 +2,10 @@ from typing import Iterator import praw -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import BlockSecret, SecretField +from backend.data.model import BlockSecret, SchemaField, SecretField from backend.util.mock import MockObject @@ -48,25 +48,25 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit: class GetRedditPostsBlock(Block): class Input(BlockSchema): - subreddit: str = Field(description="Subreddit name") - creds: RedditCredentials = Field( + subreddit: str = SchemaField(description="Subreddit name") + creds: RedditCredentials = SchemaField( description="Reddit credentials", default=RedditCredentials(), ) - last_minutes: int | None = Field( + last_minutes: int | None = SchemaField( description="Post time to stop minutes ago while fetching posts", default=None, ) - last_post: str | None = Field( + last_post: str | None = SchemaField( description="Post ID to stop when reached while fetching posts", default=None, ) - post_limit: int | None = Field( + post_limit: int | None = SchemaField( description="Number of posts to fetch", default=10 ) class Output(BlockSchema): - post: RedditPost = Field(description="Reddit post") + post: RedditPost = SchemaField(description="Reddit post") def __init__(self): super().__init__( @@ -140,13 +140,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class PostRedditCommentBlock(Block): class Input(BlockSchema): - creds: RedditCredentials = Field( + creds: RedditCredentials = SchemaField( description="Reddit credentials", default=RedditCredentials() ) - data: RedditComment = Field(description="Reddit comment") + data: RedditComment = SchemaField(description="Reddit comment") class Output(BlockSchema): - comment_id: str + comment_id: str = SchemaField(description="Posted comment ID") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/search.py b/autogpt_platform/backend/backend/blocks/search.py index 3c93b139d693..16d66a09fc40 100644 --- a/autogpt_platform/backend/backend/blocks/search.py +++ b/autogpt_platform/backend/backend/blocks/search.py @@ -27,11 +27,13 @@ def get_request( class GetWikipediaSummaryBlock(Block, GetRequest): class Input(BlockSchema): - topic: str + topic: str = SchemaField(description="The topic to fetch the summary for") class Output(BlockSchema): - summary: str - error: str + summary: str = SchemaField(description="The summary of the given topic") + error: str = SchemaField( + description="Error message if the summary cannot be retrieved" + ) def __init__(self): super().__init__( @@ -56,11 +58,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class SearchTheWebBlock(Block, GetRequest): class Input(BlockSchema): - query: str # The search query + query: str = SchemaField(description="The search query to search the web for") class Output(BlockSchema): - results: str # The search results including content from top 5 URLs - error: str # Error message if the search fails + results: str = SchemaField( + description="The search results including content from top 5 URLs" + ) + error: str = SchemaField(description="Error message if the search fails") def __init__(self): super().__init__( @@ -90,7 +94,7 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class ExtractWebsiteContentBlock(Block, GetRequest): class Input(BlockSchema): - url: str # The URL to scrape + url: str = SchemaField(description="The URL to scrape the content from") raw_content: bool = SchemaField( default=False, title="Raw Content", @@ -99,8 +103,10 @@ class Input(BlockSchema): ) class Output(BlockSchema): - content: str # The scraped content from the URL - error: str + content: str = SchemaField(description="The scraped content from the given URL") + error: str = SchemaField( + description="Error message if the content cannot be retrieved" + ) def __init__(self): super().__init__( @@ -126,15 +132,26 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class GetWeatherInformationBlock(Block, GetRequest): class Input(BlockSchema): - location: str + location: str = SchemaField( + description="Location to get weather information for" + ) api_key: BlockSecret = SecretField(key="openweathermap_api_key") - use_celsius: bool = True + use_celsius: bool = SchemaField( + default=True, + description="Whether to use Celsius or Fahrenheit for temperature", + ) class Output(BlockSchema): - temperature: str - humidity: str - condition: str - error: str + temperature: str = SchemaField( + description="Temperature in the specified location" + ) + humidity: str = SchemaField(description="Humidity in the specified location") + condition: str = SchemaField( + description="Weather condition in the specified location" + ) + error: str = SchemaField( + description="Error message if the weather information cannot be retrieved" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/talking_head.py b/autogpt_platform/backend/backend/blocks/talking_head.py index e93b69ed8547..f4497d85ffab 100644 --- a/autogpt_platform/backend/backend/blocks/talking_head.py +++ b/autogpt_platform/backend/backend/blocks/talking_head.py @@ -13,7 +13,8 @@ class Input(BlockSchema): key="did_api_key", description="D-ID API Key" ) script_input: str = SchemaField( - description="The text input for the script", default="Welcome to AutoGPT" + description="The text input for the script", + placeholder="Welcome to AutoGPT", ) provider: Literal["microsoft", "elevenlabs", "amazon"] = SchemaField( description="The voice provider to use", default="microsoft" diff --git a/autogpt_platform/backend/backend/blocks/text.py b/autogpt_platform/backend/backend/blocks/text.py index da287b94fa5f..1d8c050fa147 100644 --- a/autogpt_platform/backend/backend/blocks/text.py +++ b/autogpt_platform/backend/backend/blocks/text.py @@ -2,9 +2,9 @@ from typing import Any from jinja2 import BaseLoader, Environment -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField from backend.util import json jinja = Environment(loader=BaseLoader()) @@ -12,15 +12,17 @@ class MatchTextPatternBlock(Block): class Input(BlockSchema): - text: Any = Field(description="Text to match") - match: str = Field(description="Pattern (Regex) to match") - data: Any = Field(description="Data to be forwarded to output") - case_sensitive: bool = Field(description="Case sensitive match", default=True) - dot_all: bool = Field(description="Dot matches all", default=True) + text: Any = SchemaField(description="Text to match") + match: str = SchemaField(description="Pattern (Regex) to match") + data: Any = SchemaField(description="Data to be forwarded to output") + case_sensitive: bool = SchemaField( + description="Case sensitive match", default=True + ) + dot_all: bool = SchemaField(description="Dot matches all", default=True) class Output(BlockSchema): - positive: Any = Field(description="Output data if match is found") - negative: Any = Field(description="Output data if match is not found") + positive: Any = SchemaField(description="Output data if match is found") + negative: Any = SchemaField(description="Output data if match is not found") def __init__(self): super().__init__( @@ -64,15 +66,17 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class ExtractTextInformationBlock(Block): class Input(BlockSchema): - text: Any = Field(description="Text to parse") - pattern: str = Field(description="Pattern (Regex) to parse") - group: int = Field(description="Group number to extract", default=0) - case_sensitive: bool = Field(description="Case sensitive match", default=True) - dot_all: bool = Field(description="Dot matches all", default=True) + text: Any = SchemaField(description="Text to parse") + pattern: str = SchemaField(description="Pattern (Regex) to parse") + group: int = SchemaField(description="Group number to extract", default=0) + case_sensitive: bool = SchemaField( + description="Case sensitive match", default=True + ) + dot_all: bool = SchemaField(description="Dot matches all", default=True) class Output(BlockSchema): - positive: str = Field(description="Extracted text") - negative: str = Field(description="Original text") + positive: str = SchemaField(description="Extracted text") + negative: str = SchemaField(description="Original text") def __init__(self): super().__init__( @@ -116,11 +120,15 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class FillTextTemplateBlock(Block): class Input(BlockSchema): - values: dict[str, Any] = Field(description="Values (dict) to be used in format") - format: str = Field(description="Template to format the text using `values`") + values: dict[str, Any] = SchemaField( + description="Values (dict) to be used in format" + ) + format: str = SchemaField( + description="Template to format the text using `values`" + ) class Output(BlockSchema): - output: str + output: str = SchemaField(description="Formatted text") def __init__(self): super().__init__( @@ -155,11 +163,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class CombineTextsBlock(Block): class Input(BlockSchema): - input: list[str] = Field(description="text input to combine") - delimiter: str = Field(description="Delimiter to combine texts", default="") + input: list[str] = SchemaField(description="text input to combine") + delimiter: str = SchemaField( + description="Delimiter to combine texts", default="" + ) class Output(BlockSchema): - output: str = Field(description="Combined text") + output: str = SchemaField(description="Combined text") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/time_blocks.py b/autogpt_platform/backend/backend/blocks/time_blocks.py index 9e95d428b9cc..eb886b5352c8 100644 --- a/autogpt_platform/backend/backend/blocks/time_blocks.py +++ b/autogpt_platform/backend/backend/blocks/time_blocks.py @@ -3,14 +3,22 @@ from typing import Any, Union from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class GetCurrentTimeBlock(Block): class Input(BlockSchema): - trigger: str + trigger: str = SchemaField( + description="Trigger any data to output the current time" + ) + format: str = SchemaField( + description="Format of the time to output", default="%H:%M:%S" + ) class Output(BlockSchema): - time: str + time: str = SchemaField( + description="Current time in the specified format (default: %H:%M:%S)" + ) def __init__(self): super().__init__( @@ -20,25 +28,38 @@ def __init__(self): input_schema=GetCurrentTimeBlock.Input, output_schema=GetCurrentTimeBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{time}"}, + {"trigger": "Hello"}, + {"trigger": "Hello", "format": "%H:%M"}, ], test_output=[ ("time", lambda _: time.strftime("%H:%M:%S")), + ("time", lambda _: time.strftime("%H:%M")), ], ) def run(self, input_data: Input, **kwargs) -> BlockOutput: - current_time = time.strftime("%H:%M:%S") + current_time = time.strftime(input_data.format) yield "time", current_time class GetCurrentDateBlock(Block): class Input(BlockSchema): - trigger: str - offset: Union[int, str] + trigger: str = SchemaField( + description="Trigger any data to output the current date" + ) + offset: Union[int, str] = SchemaField( + title="Days Offset", + description="Offset in days from the current date", + default=0, + ) + format: str = SchemaField( + description="Format of the date to output", default="%Y-%m-%d" + ) class Output(BlockSchema): - date: str + date: str = SchemaField( + description="Current date in the specified format (default: YYYY-MM-DD)" + ) def __init__(self): super().__init__( @@ -48,7 +69,8 @@ def __init__(self): input_schema=GetCurrentDateBlock.Input, output_schema=GetCurrentDateBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{date}", "offset": "7"}, + {"trigger": "Hello", "offset": "7"}, + {"trigger": "Hello", "offset": "7", "format": "%m/%d/%Y"}, ], test_output=[ ( @@ -56,6 +78,12 @@ def __init__(self): lambda t: abs(datetime.now() - datetime.strptime(t, "%Y-%m-%d")) < timedelta(days=8), # 7 days difference + 1 day error margin. ), + ( + "date", + lambda t: abs(datetime.now() - datetime.strptime(t, "%m/%d/%Y")) + < timedelta(days=8), + # 7 days difference + 1 day error margin. + ), ], ) @@ -65,15 +93,23 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: except ValueError: offset = 0 current_date = datetime.now() - timedelta(days=offset) - yield "date", current_date.strftime("%Y-%m-%d") + yield "date", current_date.strftime(input_data.format) class GetCurrentDateAndTimeBlock(Block): class Input(BlockSchema): - trigger: str + trigger: str = SchemaField( + description="Trigger any data to output the current date and time" + ) + format: str = SchemaField( + description="Format of the date and time to output", + default="%Y-%m-%d %H:%M:%S", + ) class Output(BlockSchema): - date_time: str + date_time: str = SchemaField( + description="Current date and time in the specified format (default: YYYY-MM-DD HH:MM:SS)" + ) def __init__(self): super().__init__( @@ -83,7 +119,7 @@ def __init__(self): input_schema=GetCurrentDateAndTimeBlock.Input, output_schema=GetCurrentDateAndTimeBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{date_time}"}, + {"trigger": "Hello"}, ], test_output=[ ( @@ -97,20 +133,29 @@ def __init__(self): ) def run(self, input_data: Input, **kwargs) -> BlockOutput: - current_date_time = time.strftime("%Y-%m-%d %H:%M:%S") + current_date_time = time.strftime(input_data.format) yield "date_time", current_date_time class CountdownTimerBlock(Block): class Input(BlockSchema): - input_message: Any = "timer finished" - seconds: Union[int, str] = 0 - minutes: Union[int, str] = 0 - hours: Union[int, str] = 0 - days: Union[int, str] = 0 + input_message: Any = SchemaField( + description="Message to output after the timer finishes", + default="timer finished", + ) + seconds: Union[int, str] = SchemaField( + description="Duration in seconds", default=0 + ) + minutes: Union[int, str] = SchemaField( + description="Duration in minutes", default=0 + ) + hours: Union[int, str] = SchemaField(description="Duration in hours", default=0) + days: Union[int, str] = SchemaField(description="Duration in days", default=0) class Output(BlockSchema): - output_message: str + output_message: str = SchemaField( + description="Message after the timer finishes" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py index cec50109bd4d..b4f0259d98b1 100644 --- a/autogpt_platform/backend/backend/blocks/youtube.py +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -7,9 +7,10 @@ from backend.data.model import SchemaField -class TranscribeYouTubeVideoBlock(Block): +class TranscribeYoutubeVideoBlock(Block): class Input(BlockSchema): youtube_url: str = SchemaField( + title="YouTube URL", description="The URL of the YouTube video to transcribe", placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ", ) @@ -24,8 +25,8 @@ class Output(BlockSchema): def __init__(self): super().__init__( id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c", - input_schema=TranscribeYouTubeVideoBlock.Input, - output_schema=TranscribeYouTubeVideoBlock.Output, + input_schema=TranscribeYoutubeVideoBlock.Input, + output_schema=TranscribeYoutubeVideoBlock.Output, description="Transcribes a YouTube video.", categories={BlockCategory.SOCIAL}, test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"}, diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py index 594fd10e7681..e89013b3b3f5 100644 --- a/autogpt_platform/backend/backend/data/block.py +++ b/autogpt_platform/backend/backend/data/block.py @@ -230,6 +230,11 @@ def __init__( self.disabled = disabled self.static_output = static_output self.block_type = block_type + self.execution_stats = {} + + @classmethod + def create(cls: Type["Block"]) -> "Block": + return cls() @abstractmethod def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput: @@ -244,6 +249,26 @@ def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput: """ pass + def run_once(self, input_data: BlockSchemaInputType, output: str, **kwargs) -> Any: + for name, data in self.run(input_data, **kwargs): + if name == output: + return data + raise ValueError(f"{self.name} did not produce any output for {output}") + + def merge_stats(self, stats: dict[str, Any]) -> dict[str, Any]: + for key, value in stats.items(): + if isinstance(value, dict): + self.execution_stats.setdefault(key, {}).update(value) + elif isinstance(value, (int, float)): + self.execution_stats.setdefault(key, 0) + self.execution_stats[key] += value + elif isinstance(value, list): + self.execution_stats.setdefault(key, []) + self.execution_stats[key].extend(value) + else: + self.execution_stats[key] = value + return self.execution_stats + @property def name(self): return self.__class__.__name__ @@ -282,14 +307,15 @@ def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput: # ======================= Block Helper Functions ======================= # -def get_blocks() -> dict[str, Block]: +def get_blocks() -> dict[str, Type[Block]]: from backend.blocks import AVAILABLE_BLOCKS # noqa: E402 return AVAILABLE_BLOCKS async def initialize_blocks() -> None: - for block in get_blocks().values(): + for cls in get_blocks().values(): + block = cls() existing_block = await AgentBlock.prisma().find_first( where={"OR": [{"id": block.id}, {"name": block.name}]} ) @@ -324,4 +350,5 @@ async def initialize_blocks() -> None: def get_block(block_id: str) -> Block | None: - return get_blocks().get(block_id) + cls = get_blocks().get(block_id) + return cls() if cls else None diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index e1fccb42897f..5581a7854226 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -17,6 +17,7 @@ AITextSummarizerBlock, LlmModel, ) +from backend.blocks.search import ExtractWebsiteContentBlock, SearchTheWebBlock from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock from backend.data.block import Block, BlockInput, get_block from backend.util.settings import Config @@ -74,6 +75,10 @@ def __init__( CreateTalkingAvatarVideoBlock: [ BlockCost(cost_amount=15, cost_filter={"api_key": None}) ], + SearchTheWebBlock: [BlockCost(cost_amount=1)], + ExtractWebsiteContentBlock: [ + BlockCost(cost_amount=1, cost_filter={"raw_content": False}) + ], } diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 4f1be1de1ed8..b4f8f8aeb739 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -257,7 +257,7 @@ def is_input_output_block(nid: str) -> bool: block = get_block(node.block_id) if not block: - blocks = {v.id: v.name for v in get_blocks().values()} + blocks = {v().id: v().name for v in get_blocks().values()} raise ValueError( f"{suffix}, {node.block_id} is invalid block id, available blocks: {blocks}" ) diff --git a/autogpt_platform/backend/backend/data/user.py b/autogpt_platform/backend/backend/data/user.py index db60eea235cb..477b3bae6526 100644 --- a/autogpt_platform/backend/backend/data/user.py +++ b/autogpt_platform/backend/backend/data/user.py @@ -1,6 +1,8 @@ from typing import Optional +from autogpt_libs.supabase_integration_credentials_store.types import UserMetadataRaw from fastapi import HTTPException +from prisma import Json from prisma.models import User from backend.data.db import prisma @@ -35,16 +37,32 @@ async def get_user_by_id(user_id: str) -> Optional[User]: return User.model_validate(user) if user else None -async def create_default_user(enable_auth: str) -> Optional[User]: - if not enable_auth.lower() == "true": - user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID}) - if not user: - user = await prisma.user.create( - data={ - "id": DEFAULT_USER_ID, - "email": "default@example.com", - "name": "Default User", - } - ) - return User.model_validate(user) - return None +async def create_default_user() -> Optional[User]: + user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID}) + if not user: + user = await prisma.user.create( + data={ + "id": DEFAULT_USER_ID, + "email": "default@example.com", + "name": "Default User", + } + ) + return User.model_validate(user) + + +async def get_user_metadata(user_id: str) -> UserMetadataRaw: + user = await User.prisma().find_unique_or_raise( + where={"id": user_id}, + ) + return ( + UserMetadataRaw.model_validate(user.metadata) + if user.metadata + else UserMetadataRaw() + ) + + +async def update_user_metadata(user_id: str, metadata: UserMetadataRaw): + await User.prisma().update( + where={"id": user_id}, + data={"metadata": Json(metadata.model_dump())}, + ) diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index 8257d5c8c07c..b404ac6ecf17 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -16,6 +16,7 @@ ) from backend.data.graph import get_graph, get_node from backend.data.queue import RedisEventQueue +from backend.data.user import get_user_metadata, update_user_metadata from backend.util.service import AppService, expose from backend.util.settings import Config @@ -26,11 +27,15 @@ class DatabaseManager(AppService): def __init__(self): - super().__init__(port=Config().database_api_port) + super().__init__() self.use_db = True self.use_redis = True self.event_queue = RedisEventQueue() + @classmethod + def get_port(cls) -> int: + return Config().database_api_port + @expose def send_execution_update(self, execution_result_dict: dict[Any, Any]): self.event_queue.put(ExecutionResult(**execution_result_dict)) @@ -73,3 +78,7 @@ def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R: Callable[[Any, str, int, str, dict[str, str], float, float], int], exposed_run_and_wait(user_credit_model.spend_credits), ) + + # User + User Metadata + get_user_metadata = exposed_run_and_wait(get_user_metadata) + update_user_metadata = exposed_run_and_wait(update_user_metadata) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index d756dfc6ecf8..102de62c1698 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -16,6 +16,8 @@ if TYPE_CHECKING: from backend.executor import DatabaseManager +from autogpt_libs.utils.cache import thread_cached + from backend.data import redis from backend.data.block import Block, BlockData, BlockInput, BlockType, get_block from backend.data.execution import ( @@ -31,7 +33,6 @@ from backend.data.model import CREDENTIALS_FIELD_NAME, CredentialsMetaInput from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.util import json -from backend.util.cache import thread_cached_property from backend.util.decorator import error_logged, time_measured from backend.util.logging import configure_logging from backend.util.process import set_service_name @@ -104,6 +105,7 @@ def execute_node( Args: db_client: The client to send execution updates to the server. + creds_manager: The manager to acquire and release credentials. data: The execution data for executing the current node. execution_stats: The execution statistics to be updated. @@ -153,18 +155,19 @@ def update_execution(status: ExecutionStatus) -> ExecutionResult: # changes during execution. ⚠️ This means a set of credentials can only be used by # one (running) block at a time; simultaneous execution of blocks using same # credentials is not supported. - credentials = creds_lock = None + creds_lock = None if CREDENTIALS_FIELD_NAME in input_data: credentials_meta = CredentialsMetaInput(**input_data[CREDENTIALS_FIELD_NAME]) credentials, creds_lock = creds_manager.acquire(user_id, credentials_meta.id) extra_exec_kwargs["credentials"] = credentials output_size = 0 - try: - credit = db_client.get_or_refill_credit(user_id) - if credit < 0: - raise ValueError(f"Insufficient credit: {credit}") + end_status = ExecutionStatus.COMPLETED + credit = db_client.get_or_refill_credit(user_id) + if credit < 0: + raise ValueError(f"Insufficient credit: {credit}") + try: for output_name, output_data in node_block.execute( input_data, **extra_exec_kwargs ): @@ -183,32 +186,46 @@ def update_execution(status: ExecutionStatus) -> ExecutionResult: ): yield execution - # Release lock on credentials ASAP - if creds_lock: - creds_lock.release() - - r = update_execution(ExecutionStatus.COMPLETED) - s = input_size + output_size - t = ( - (r.end_time - r.start_time).total_seconds() - if r.end_time and r.start_time - else 0 - ) - db_client.spend_credits(user_id, credit, node_block.id, input_data, s, t) - except Exception as e: + end_status = ExecutionStatus.FAILED error_msg = str(e) log_metadata.exception(f"Node execution failed with error {error_msg}") db_client.upsert_execution_output(node_exec_id, "error", error_msg) - update_execution(ExecutionStatus.FAILED) - raise e + for execution in _enqueue_next_nodes( + db_client=db_client, + node=node, + output=("error", error_msg), + user_id=user_id, + graph_exec_id=graph_exec_id, + graph_id=graph_id, + log_metadata=log_metadata, + ): + yield execution + raise e finally: # Ensure credentials are released even if execution fails if creds_lock: - creds_lock.release() + try: + creds_lock.release() + except Exception as e: + log_metadata.error(f"Failed to release credentials lock: {e}") + + # Update execution status and spend credits + res = update_execution(end_status) + if end_status == ExecutionStatus.COMPLETED: + s = input_size + output_size + t = ( + (res.end_time - res.start_time).total_seconds() + if res.end_time and res.start_time + else 0 + ) + db_client.spend_credits(user_id, credit, node_block.id, input_data, s, t) + + # Update execution stats if execution_stats is not None: + execution_stats.update(node_block.execution_stats) execution_stats["input_size"] = input_size execution_stats["output_size"] = output_size @@ -657,20 +674,24 @@ def callback(_): class ExecutionManager(AppService): def __init__(self): - super().__init__(port=settings.config.execution_manager_port) + super().__init__() self.use_redis = True self.use_supabase = True self.pool_size = settings.config.num_graph_workers self.queue = ExecutionQueue[GraphExecution]() self.active_graph_runs: dict[str, tuple[Future, threading.Event]] = {} + @classmethod + def get_port(cls) -> int: + return settings.config.execution_manager_port + def run_service(self): from autogpt_libs.supabase_integration_credentials_store import ( SupabaseIntegrationCredentialsStore, ) self.credentials_store = SupabaseIntegrationCredentialsStore( - self.supabase, redis.get_redis() + redis=redis.get_redis() ) self.executor = ProcessPoolExecutor( max_workers=self.pool_size, @@ -701,7 +722,7 @@ def cleanup(self): super().cleanup() - @thread_cached_property + @property def db_client(self) -> "DatabaseManager": return get_db_client() @@ -857,10 +878,11 @@ def _validate_node_input_credentials(self, graph: Graph, user_id: str): # ------- UTILITIES ------- # +@thread_cached def get_db_client() -> "DatabaseManager": from backend.executor import DatabaseManager - return get_service_client(DatabaseManager, settings.config.database_api_port) + return get_service_client(DatabaseManager) @contextmanager diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index 574765c34845..979631c0585e 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -4,6 +4,7 @@ from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger +from autogpt_libs.utils.cache import thread_cached_property from backend.data.block import BlockInput from backend.data.schedule import ( @@ -14,7 +15,6 @@ update_schedule, ) from backend.executor.manager import ExecutionManager -from backend.util.cache import thread_cached_property from backend.util.service import AppService, expose, get_service_client from backend.util.settings import Config @@ -28,14 +28,18 @@ def log(msg, **kwargs): class ExecutionScheduler(AppService): def __init__(self, refresh_interval=10): - super().__init__(port=Config().execution_scheduler_port) + super().__init__() self.use_db = True self.last_check = datetime.min self.refresh_interval = refresh_interval + @classmethod + def get_port(cls) -> int: + return Config().execution_scheduler_port + @thread_cached_property def execution_client(self) -> ExecutionManager: - return get_service_client(ExecutionManager, Config().execution_manager_port) + return get_service_client(ExecutionManager) def run_service(self): scheduler = BackgroundScheduler() diff --git a/autogpt_platform/backend/backend/integrations/creds_manager.py b/autogpt_platform/backend/backend/integrations/creds_manager.py index 6fcee8eecfee..96f9d1a3c56d 100644 --- a/autogpt_platform/backend/backend/integrations/creds_manager.py +++ b/autogpt_platform/backend/backend/integrations/creds_manager.py @@ -13,8 +13,6 @@ from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler from backend.util.settings import Settings -from ..server.integrations.utils import get_supabase - logger = logging.getLogger(__name__) settings = Settings() @@ -54,7 +52,7 @@ class IntegrationCredentialsManager: def __init__(self): redis_conn = redis.get_redis() self._locks = RedisKeyedMutex(redis_conn) - self.store = SupabaseIntegrationCredentialsStore(get_supabase(), redis_conn) + self.store = SupabaseIntegrationCredentialsStore(redis=redis_conn) def create(self, user_id: str, credentials: Credentials) -> None: return self.store.add_creds(user_id, credentials) @@ -131,7 +129,7 @@ def delete(self, user_id: str, credentials_id: str) -> None: def _acquire_lock(self, user_id: str, credentials_id: str, *args: str) -> RedisLock: key = ( - self.store.supabase.supabase_url, + self.store.db_manager, f"user:{user_id}", f"credentials:{credentials_id}", *args, diff --git a/autogpt_platform/backend/backend/server/integrations/router.py b/autogpt_platform/backend/backend/server/integrations/router.py index a44954b37157..5163de0b2fa3 100644 --- a/autogpt_platform/backend/backend/server/integrations/router.py +++ b/autogpt_platform/backend/backend/server/integrations/router.py @@ -19,6 +19,7 @@ logger = logging.getLogger(__name__) settings = Settings() router = APIRouter() + creds_manager = IntegrationCredentialsManager() @@ -41,7 +42,7 @@ async def login( requested_scopes = scopes.split(",") if scopes else [] # Generate and store a secure random state token along with the scopes - state_token = await creds_manager.store.store_state_token( + state_token = creds_manager.store.store_state_token( user_id, provider, requested_scopes ) @@ -70,12 +71,12 @@ async def callback( handler = _get_provider_oauth_handler(request, provider) # Verify the state token - if not await creds_manager.store.verify_state_token(user_id, state_token, provider): + if not creds_manager.store.verify_state_token(user_id, state_token, provider): logger.warning(f"Invalid or expired state token for user {user_id}") raise HTTPException(status_code=400, detail="Invalid or expired state token") try: - scopes = await creds_manager.store.get_any_valid_scopes_from_state_token( + scopes = creds_manager.store.get_any_valid_scopes_from_state_token( user_id, state_token, provider ) logger.debug(f"Retrieved scopes from state token: {scopes}") diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index 880d41817f21..f0d922c19686 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -7,6 +7,7 @@ import uvicorn from autogpt_libs.auth.middleware import auth_middleware +from autogpt_libs.utils.cache import thread_cached_property from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse @@ -19,9 +20,7 @@ from backend.data.credit import get_block_costs, get_user_credit_model from backend.data.user import get_or_create_user from backend.executor import ExecutionManager, ExecutionScheduler -from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.server.model import CreateGraph, SetGraphActiveVersion -from backend.util.cache import thread_cached_property from backend.util.service import AppService, get_service_client from backend.util.settings import AppEnvironment, Config, Settings @@ -36,9 +35,13 @@ class AgentServer(AppService): _user_credit_model = get_user_credit_model() def __init__(self): - super().__init__(port=Config().agent_server_port) + super().__init__() self.use_redis = True + @classmethod + def get_port(cls) -> int: + return Config().agent_server_port + @asynccontextmanager async def lifespan(self, _: FastAPI): await db.connect() @@ -97,7 +100,6 @@ def run_service(self): tags=["integrations"], dependencies=[Depends(auth_middleware)], ) - self.integration_creds_manager = IntegrationCredentialsManager() api_router.include_router( backend.server.routers.analytics.router, @@ -307,11 +309,11 @@ async def wrapper(*args, **kwargs): @thread_cached_property def execution_manager_client(self) -> ExecutionManager: - return get_service_client(ExecutionManager, Config().execution_manager_port) + return get_service_client(ExecutionManager) @thread_cached_property def execution_scheduler_client(self) -> ExecutionScheduler: - return get_service_client(ExecutionScheduler, Config().execution_scheduler_port) + return get_service_client(ExecutionScheduler) @classmethod def handle_internal_http_error(cls, request: Request, exc: Exception): @@ -330,9 +332,9 @@ async def get_or_create_user_route(cls, user_data: dict = Depends(auth_middlewar @classmethod def get_graph_blocks(cls) -> list[dict[Any, Any]]: - blocks = block.get_blocks() + blocks = [cls() for cls in block.get_blocks().values()] costs = get_block_costs() - return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks.values()] + return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks] @classmethod def execute_graph_block( diff --git a/autogpt_platform/backend/backend/server/ws_api.py b/autogpt_platform/backend/backend/server/ws_api.py index 5e35693be444..8cbda22e1ed1 100644 --- a/autogpt_platform/backend/backend/server/ws_api.py +++ b/autogpt_platform/backend/backend/server/ws_api.py @@ -28,7 +28,7 @@ async def lifespan(app: FastAPI): docs_url = "/docs" if settings.config.app_env == AppEnvironment.LOCAL else None -app = FastAPI(lifespan=lifespan) +app = FastAPI(lifespan=lifespan, docs_url=docs_url) _connection_manager = None logger.info(f"CORS allow origins: {settings.config.backend_cors_allow_origins}") @@ -66,7 +66,7 @@ async def event_broadcaster(manager: ConnectionManager): async def authenticate_websocket(websocket: WebSocket) -> str: - if settings.config.enable_auth.lower() == "true": + if settings.config.enable_auth: token = websocket.query_params.get("token") if not token: await websocket.close(code=4001, reason="Missing authentication token") diff --git a/autogpt_platform/backend/backend/util/cache.py b/autogpt_platform/backend/backend/util/cache.py deleted file mode 100644 index 30048e8781af..000000000000 --- a/autogpt_platform/backend/backend/util/cache.py +++ /dev/null @@ -1,21 +0,0 @@ -import threading -from functools import wraps -from typing import Callable, TypeVar - -T = TypeVar("T") -R = TypeVar("R") - - -def thread_cached_property(func: Callable[[T], R]) -> property: - local_cache = threading.local() - - @wraps(func) - def wrapper(self: T) -> R: - if not hasattr(local_cache, "cache"): - local_cache.cache = {} - key = id(self) - if key not in local_cache.cache: - local_cache.cache[key] = func(self) - return local_cache.cache[key] - - return property(wrapper) diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index 3c4532b5ad00..5c01530ec158 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -5,6 +5,7 @@ import threading import time import typing +from abc import ABC, abstractmethod from enum import Enum from types import NoneType, UnionType from typing import ( @@ -99,16 +100,24 @@ def custom_dict_to_class(qualname, data: dict): return custom_dict_to_class -class AppService(AppProcess): +class AppService(AppProcess, ABC): shared_event_loop: asyncio.AbstractEventLoop use_db: bool = False use_redis: bool = False use_supabase: bool = False - def __init__(self, port): - self.port = port + def __init__(self): self.uri = None + @classmethod + @abstractmethod + def get_port(cls) -> int: + pass + + @classmethod + def get_host(cls) -> str: + return os.environ.get(f"{cls.service_name.upper()}_HOST", Config().pyro_host) + def run_service(self) -> None: while True: time.sleep(10) @@ -158,7 +167,7 @@ def cleanup(self): @conn_retry("Pyro", "Starting Pyro Service") def __start_pyro(self): host = Config().pyro_host - daemon = Pyro5.api.Daemon(host=host, port=self.port) + daemon = Pyro5.api.Daemon(host=host, port=self.get_port()) self.uri = daemon.register(self, objectId=self.service_name) logger.info(f"[{self.service_name}] Connected to Pyro; URI = {self.uri}") daemon.requestLoop() @@ -167,17 +176,20 @@ def __start_async_loop(self): self.shared_event_loop.run_forever() +# --------- UTILITIES --------- # + + AS = TypeVar("AS", bound=AppService) -def get_service_client(service_type: Type[AS], port: int) -> AS: +def get_service_client(service_type: Type[AS]) -> AS: service_name = service_type.service_name class DynamicClient: @conn_retry("Pyro", f"Connecting to [{service_name}]") def __init__(self): host = os.environ.get(f"{service_name.upper()}_HOST", "localhost") - uri = f"PYRO:{service_type.service_name}@{host}:{port}" + uri = f"PYRO:{service_type.service_name}@{host}:{service_type.get_port()}" logger.debug(f"Connecting to service [{service_name}]. URI = {uri}") self.proxy = Pyro5.api.Proxy(uri) # Attempt to bind to ensure the connection is established @@ -191,8 +203,6 @@ def __getattr__(self, name: str) -> Callable[..., Any]: return cast(AS, DynamicClient()) -# --------- UTILITIES --------- # - builtin_types = [*vars(builtins).values(), NoneType, Enum] diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 2a9c8c5dd5b8..dee28092aba7 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -69,8 +69,8 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): default="localhost", description="The default hostname of the Pyro server.", ) - enable_auth: str = Field( - default="false", + enable_auth: bool = Field( + default=True, description="If authentication is enabled or not", ) enable_credit: str = Field( @@ -117,11 +117,6 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="The port for agent server daemon to run on", ) - database_api_host: str = Field( - default="0.0.0.0", - description="The host for database server API to run on", - ) - database_api_port: int = Field( default=8005, description="The port for database server API to run on", @@ -138,7 +133,7 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): ) frontend_base_url: str = Field( - default="", + default="http://localhost:3000", description="Can be used to explicitly set the base URL for the frontend. " "This value is then used to generate redirect URLs for OAuth flows.", ) diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index 704d6507e081..d1e2d83f7f3f 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -31,7 +31,7 @@ async def __aenter__(self): await db.connect() await initialize_blocks() - await create_default_user("false") + await create_default_user() return self diff --git a/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql b/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql new file mode 100644 index 000000000000..b3886efa030a --- /dev/null +++ b/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "User" ADD COLUMN "metadata" JSONB; diff --git a/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql b/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql new file mode 100644 index 000000000000..aa577c90e938 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql @@ -0,0 +1,27 @@ +--CreateFunction +CREATE OR REPLACE FUNCTION add_user_to_platform() RETURNS TRIGGER AS $$ +BEGIN + INSERT INTO platform."User" (id, email, "updatedAt") + VALUES (NEW.id, NEW.email, now()); + RETURN NEW; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +DO $$ +BEGIN + -- Check if the auth schema and users table exist + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'auth' + AND table_name = 'users' + ) THEN + -- Drop the trigger if it exists + DROP TRIGGER IF EXISTS user_added_to_platform ON auth.users; + + -- Create the trigger + CREATE TRIGGER user_added_to_platform + AFTER INSERT ON auth.users + FOR EACH ROW EXECUTE FUNCTION add_user_to_platform(); + END IF; +END $$; diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index bbe9f6382caf..4d7a84339125 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -17,13 +17,13 @@ yarl = "*" [[package]] name = "aiohappyeyeballs" -version = "2.4.2" +version = "2.4.3" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "aiohappyeyeballs-2.4.2-py3-none-any.whl", hash = "sha256:8522691d9a154ba1145b157d6d5c15e5c692527ce6a53c5e5f9876977f6dab2f"}, - {file = "aiohappyeyeballs-2.4.2.tar.gz", hash = "sha256:4ca893e6c5c1f5bf3888b04cb5a3bee24995398efef6e0b9f747b5e89d84fd74"}, + {file = "aiohappyeyeballs-2.4.3-py3-none-any.whl", hash = "sha256:8a7a83727b2756f394ab2895ea0765a0a8c475e3c71e98d43d76f22b4b435572"}, + {file = "aiohappyeyeballs-2.4.3.tar.gz", hash = "sha256:75cf88a15106a5002a8eb1dab212525c00d1f4c0fa96e551c9fbe6f09a621586"}, ] [[package]] @@ -1841,6 +1841,57 @@ files = [ {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] +[[package]] +name = "pinecone" +version = "5.3.1" +description = "Pinecone client and SDK" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "pinecone-5.3.1-py3-none-any.whl", hash = "sha256:dd180963d29cd648f2d58becf18b21f150362aef80446dd3a7ed15cbe85bb4c7"}, + {file = "pinecone-5.3.1.tar.gz", hash = "sha256:a216630331753958f4ebcdc6e6d473402d17152f2194af3e19b3416c73b0dcc4"}, +] + +[package.dependencies] +certifi = ">=2019.11.17" +pinecone-plugin-inference = ">=1.1.0,<2.0.0" +pinecone-plugin-interface = ">=0.0.7,<0.0.8" +python-dateutil = ">=2.5.3" +tqdm = ">=4.64.1" +typing-extensions = ">=3.7.4" +urllib3 = [ + {version = ">=1.26.0", markers = "python_version >= \"3.8\" and python_version < \"3.12\""}, + {version = ">=1.26.5", markers = "python_version >= \"3.12\" and python_version < \"4.0\""}, +] + +[package.extras] +grpc = ["googleapis-common-protos (>=1.53.0)", "grpcio (>=1.44.0)", "grpcio (>=1.59.0)", "lz4 (>=3.1.3)", "protobuf (>=4.25,<5.0)", "protoc-gen-openapiv2 (>=0.0.1,<0.0.2)"] + +[[package]] +name = "pinecone-plugin-inference" +version = "1.1.0" +description = "Embeddings plugin for Pinecone SDK" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "pinecone_plugin_inference-1.1.0-py3-none-any.whl", hash = "sha256:32c61aba21c9a28fdcd0e782204c1ca641aeb3fd6e42764fbf0de8186eb657ec"}, + {file = "pinecone_plugin_inference-1.1.0.tar.gz", hash = "sha256:283e5ae4590b901bf2179beb56fc3d1b715e63582f37ec7abb0708cf70912d1f"}, +] + +[package.dependencies] +pinecone-plugin-interface = ">=0.0.7,<0.0.8" + +[[package]] +name = "pinecone-plugin-interface" +version = "0.0.7" +description = "Plugin interface for the Pinecone python client" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "pinecone_plugin_interface-0.0.7-py3-none-any.whl", hash = "sha256:875857ad9c9fc8bbc074dbe780d187a2afd21f5bfe0f3b08601924a61ef1bba8"}, + {file = "pinecone_plugin_interface-0.0.7.tar.gz", hash = "sha256:b8e6675e41847333aa13923cc44daa3f85676d7157324682dc1640588a982846"}, +] + [[package]] name = "platformdirs" version = "4.3.6" @@ -2092,8 +2143,8 @@ files = [ annotated-types = ">=0.6.0" pydantic-core = "2.23.4" typing-extensions = [ - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, ] [package.extras] @@ -3668,4 +3719,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "0962d61ced1a8154c64c6bbdb3f72aca558831adfbfda68eb66f39b535466f77" +content-hash = "f9293b504ef813f98f43a8c3ab1b779ff9d7dc2e3bd9412fccc6da5102915e6b" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index 4cd3637976d0..d2450fc56740 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -27,7 +27,7 @@ jsonref = "^1.1.0" jsonschema = "^4.22.0" ollama = "^0.3.0" openai = "^1.35.7" -praw = "^7.7.1" +praw = "~7.7.1" prisma = "^0.13.1" psutil = "^5.9.8" pydantic = "^2.7.2" @@ -45,7 +45,7 @@ websockets = "^12.0" youtube-transcript-api = "^0.6.2" googlemaps = "^4.10.0" replicate = "^0.34.1" - +pinecone = "^5.3.1" [tool.poetry.group.dev.dependencies] poethepoet = "^0.26.1" httpx = "^0.27.0" @@ -55,6 +55,7 @@ ruff = "^0.5.2" pyright = "^1.1.371" isort = "^5.13.2" black = "^24.4.2" +aiohappyeyeballs = "^2.4.3" [build-system] requires = ["poetry-core"] diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index 3fab8dc2593d..b316e226d202 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -17,6 +17,7 @@ model User { name String? createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + metadata Json? // Relations AgentGraphs AgentGraph[] diff --git a/autogpt_platform/backend/test/__init__.py b/autogpt_platform/backend/test/__init__.py index e69de29bb2d1..d10438719da5 100644 --- a/autogpt_platform/backend/test/__init__.py +++ b/autogpt_platform/backend/test/__init__.py @@ -0,0 +1,3 @@ +import os + +os.environ["ENABLE_AUTH"] = "false" diff --git a/autogpt_platform/backend/test/block/test_block.py b/autogpt_platform/backend/test/block/test_block.py index be16a0b1a76e..48d2616f613e 100644 --- a/autogpt_platform/backend/test/block/test_block.py +++ b/autogpt_platform/backend/test/block/test_block.py @@ -1,3 +1,5 @@ +from typing import Type + import pytest from backend.data.block import Block, get_blocks @@ -5,5 +7,5 @@ @pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b.name) -def test_available_blocks(block: Block): - execute_block_test(type(block)()) +def test_available_blocks(block: Type[Block]): + execute_block_test(block()) diff --git a/autogpt_platform/backend/test/executor/test_scheduler.py b/autogpt_platform/backend/test/executor/test_scheduler.py index c0bcc8307925..49e46510a120 100644 --- a/autogpt_platform/backend/test/executor/test_scheduler.py +++ b/autogpt_platform/backend/test/executor/test_scheduler.py @@ -5,7 +5,6 @@ from backend.server.model import CreateGraph from backend.usecases.sample import create_test_graph, create_test_user from backend.util.service import get_service_client -from backend.util.settings import Config from backend.util.test import SpinTestServer @@ -19,10 +18,7 @@ async def test_agent_schedule(server: SpinTestServer): user_id=test_user.id, ) - scheduler = get_service_client( - ExecutionScheduler, Config().execution_scheduler_port - ) - + scheduler = get_service_client(ExecutionScheduler) schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id) assert len(schedules) == 0 diff --git a/autogpt_platform/backend/test/util/test_service.py b/autogpt_platform/backend/test/util/test_service.py index 458e18b76d3a..a20810dbb1e5 100644 --- a/autogpt_platform/backend/test/util/test_service.py +++ b/autogpt_platform/backend/test/util/test_service.py @@ -5,9 +5,13 @@ TEST_SERVICE_PORT = 8765 -class TestService(AppService): +class ServiceTest(AppService): def __init__(self): - super().__init__(port=TEST_SERVICE_PORT) + super().__init__() + + @classmethod + def get_port(cls) -> int: + return TEST_SERVICE_PORT @expose def add(self, a: int, b: int) -> int: @@ -27,8 +31,8 @@ async def add_async(a: int, b: int) -> int: @pytest.mark.asyncio(scope="session") async def test_service_creation(server): - with TestService(): - client = get_service_client(TestService, TEST_SERVICE_PORT) + with ServiceTest(): + client = get_service_client(ServiceTest) assert client.add(5, 3) == 8 assert client.subtract(10, 4) == 6 assert client.fun_with_async(5, 3) == 8 diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index a0b8f670acd6..e2a24fa51fd1 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -8,7 +8,7 @@ services: develop: watch: - path: ./ - target: autogpt_platform/backend/migrate + target: autogpt_platform/backend/migrations action: rebuild depends_on: db: @@ -66,6 +66,7 @@ services: - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - EXECUTIONMANAGER_HOST=executor + - DATABASEMANAGER_HOST=executor - FRONTEND_BASE_URL=http://localhost:3000 - BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] ports: @@ -103,6 +104,7 @@ services: - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - AGENTSERVER_HOST=rest_server + - DATABASEMANAGER_HOST=0.0.0.0 ports: - "8002:8000" networks: diff --git a/autogpt_platform/docker-compose.yml b/autogpt_platform/docker-compose.yml index dcde6567f178..a1ae16b3ea63 100644 --- a/autogpt_platform/docker-compose.yml +++ b/autogpt_platform/docker-compose.yml @@ -96,6 +96,36 @@ services: file: ./supabase/docker/docker-compose.yml service: rest + realtime: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: realtime + + storage: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: storage + + imgproxy: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: imgproxy + + meta: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: meta + + functions: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: functions + analytics: <<: *supabase-services extends: @@ -112,3 +142,24 @@ services: extends: file: ./supabase/docker/docker-compose.yml service: vector + + deps: + <<: *supabase-services + profiles: + - local + image: busybox + command: /bin/true + depends_on: + - studio + - kong + - auth + - rest + - realtime + - storage + - imgproxy + - meta + - functions + - analytics + - db + - vector + - redis diff --git a/autogpt_platform/frontend/src/app/login/actions.ts b/autogpt_platform/frontend/src/app/login/actions.ts index ef0bff17ddfe..131fb9de89ed 100644 --- a/autogpt_platform/frontend/src/app/login/actions.ts +++ b/autogpt_platform/frontend/src/app/login/actions.ts @@ -22,6 +22,11 @@ export async function login(values: z.infer) { const { data, error } = await supabase.auth.signInWithPassword(values); if (error) { + if (error.status == 400) { + // Hence User is not present + redirect("/signup"); + } + return error.message; } @@ -33,35 +38,3 @@ export async function login(values: z.infer) { redirect("/"); }); } - -export async function signup(values: z.infer) { - "use server"; - return await Sentry.withServerActionInstrumentation( - "signup", - {}, - async () => { - const supabase = createServerClient(); - - if (!supabase) { - redirect("/error"); - } - - // We are sure that the values are of the correct type because zod validates the form - const { data, error } = await supabase.auth.signUp(values); - - if (error) { - if (error.message.includes("P0001")) { - return "Please join our waitlist for your turn: https://agpt.co/waitlist"; - } - return error.message; - } - - if (data.session) { - await supabase.auth.setSession(data.session); - } - - revalidatePath("/", "layout"); - redirect("/"); - }, - ); -} diff --git a/autogpt_platform/frontend/src/app/login/page.tsx b/autogpt_platform/frontend/src/app/login/page.tsx index 7457f6c7b048..1c2f3c28e8a5 100644 --- a/autogpt_platform/frontend/src/app/login/page.tsx +++ b/autogpt_platform/frontend/src/app/login/page.tsx @@ -98,22 +98,10 @@ export default function LoginPage() { setFeedback(null); }; - const onSignup = async (data: z.infer) => { - if (await form.trigger()) { - setIsLoading(true); - const error = await signup(data); - setIsLoading(false); - if (error) { - setFeedback(error); - return; - } - setFeedback(null); - } - }; - return (
+

Log in to your Account

{/*
- +
+
+ + Create a new Account +

{feedback}

diff --git a/autogpt_platform/frontend/src/app/marketplace/page.tsx b/autogpt_platform/frontend/src/app/marketplace/page.tsx index 232f94c96dc5..7fc67c95ae28 100644 --- a/autogpt_platform/frontend/src/app/marketplace/page.tsx +++ b/autogpt_platform/frontend/src/app/marketplace/page.tsx @@ -118,7 +118,7 @@ const AgentCard: React.FC<{ agent: Agent; featured?: boolean }> = ({ {agent.description}

- Categories: {agent.categories.join(", ")} + Categories: {agent.categories?.join(", ")}
@@ -315,14 +315,27 @@ const Marketplace: React.FC = () => { ) ) : ( <> - {featuredAgents.length > 0 && ( + {featuredAgents?.length > 0 ? ( + ) : ( +
+

No Featured Agents found

+
+ )} + +
+ + {topAgents?.length > 0 ? ( + + ) : ( +
+

No Top Downloaded Agents found

+
)} - { setSubmitError(null); if (!data.agreeToTerms) { - throw new Error("You must agree to the terms of service"); + throw new Error("You must agree to the terms of use"); } try { @@ -404,7 +404,7 @@ const SubmitPage: React.FC = () => { (
{ className="text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70" > I agree to the{" "} - - terms of service + + terms of use
diff --git a/autogpt_platform/frontend/src/app/signup/actions.ts b/autogpt_platform/frontend/src/app/signup/actions.ts new file mode 100644 index 000000000000..2773b87743e6 --- /dev/null +++ b/autogpt_platform/frontend/src/app/signup/actions.ts @@ -0,0 +1,46 @@ +"use server"; +import { createServerClient } from "@/lib/supabase/server"; +import * as Sentry from "@sentry/nextjs"; +import { revalidatePath } from "next/cache"; +import { redirect } from "next/navigation"; +import { z } from "zod"; + +const SignupFormSchema = z.object({ + email: z.string().email().min(2).max(64), + password: z.string().min(6).max(64), +}); + +export async function signup(values: z.infer) { + "use server"; + return await Sentry.withServerActionInstrumentation( + "signup", + {}, + async () => { + const supabase = createServerClient(); + + if (!supabase) { + redirect("/error"); + } + + // We are sure that the values are of the correct type because zod validates the form + const { data, error } = await supabase.auth.signUp(values); + + if (error) { + if (error.message.includes("P0001")) { + return "Please join our waitlist for your turn: https://agpt.co/waitlist"; + } + if (error.code?.includes("user_already_exists")) { + redirect("/login"); + } + return error.message; + } + + if (data.session) { + await supabase.auth.setSession(data.session); + } + + revalidatePath("/", "layout"); + redirect("/"); + }, + ); +} diff --git a/autogpt_platform/frontend/src/app/signup/page.tsx b/autogpt_platform/frontend/src/app/signup/page.tsx new file mode 100644 index 000000000000..1c2ad19e2e22 --- /dev/null +++ b/autogpt_platform/frontend/src/app/signup/page.tsx @@ -0,0 +1,225 @@ +"use client"; +import useUser from "@/hooks/useUser"; +import { signup } from "./actions"; +import { Button } from "@/components/ui/button"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { useForm } from "react-hook-form"; +import { Input } from "@/components/ui/input"; +import { z } from "zod"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { PasswordInput } from "@/components/PasswordInput"; +import { FaGoogle, FaGithub, FaDiscord, FaSpinner } from "react-icons/fa"; +import { useState } from "react"; +import { useSupabase } from "@/components/SupabaseProvider"; +import { useRouter } from "next/navigation"; +import Link from "next/link"; +import { Checkbox } from "@/components/ui/checkbox"; + +const signupFormSchema = z.object({ + email: z.string().email().min(2).max(64), + password: z.string().min(6).max(64), + agreeToTerms: z.boolean().refine((value) => value === true, { + message: "You must agree to the Terms of Use and Privacy Policy", + }), +}); + +export default function LoginPage() { + const { supabase, isLoading: isSupabaseLoading } = useSupabase(); + const { user, isLoading: isUserLoading } = useUser(); + const [feedback, setFeedback] = useState(null); + const router = useRouter(); + const [isLoading, setIsLoading] = useState(false); + + const form = useForm>({ + resolver: zodResolver(signupFormSchema), + defaultValues: { + email: "", + password: "", + agreeToTerms: false, + }, + }); + + if (user) { + console.log("User exists, redirecting to home"); + router.push("/"); + } + + if (isUserLoading || isSupabaseLoading || user) { + return ( +
+ +
+ ); + } + + if (!supabase) { + return ( +
+ User accounts are disabled because Supabase client is unavailable +
+ ); + } + + async function handleSignInWithProvider( + provider: "google" | "github" | "discord", + ) { + const { data, error } = await supabase!.auth.signInWithOAuth({ + provider: provider, + options: { + redirectTo: + process.env.AUTH_CALLBACK_URL ?? + `http://localhost:3000/auth/callback`, + }, + }); + + if (!error) { + setFeedback(null); + return; + } + setFeedback(error.message); + } + + const onSignup = async (data: z.infer) => { + if (await form.trigger()) { + setIsLoading(true); + const error = await signup(data); + setIsLoading(false); + if (error) { + setFeedback(error); + return; + } + setFeedback(null); + } + }; + + return ( +
+
+

Create a New Account

+ {/*
+ + + +
*/} +
+ + ( + + Email + + + + + + )} + /> + ( + + Password + + + + + Password needs to be at least 6 characters long + + + + )} + /> + ( + + + + +
+ + I agree to the{" "} + + Terms of Use + {" "} + and{" "} + + Privacy Policy + + + +
+
+ )} + /> +
+ +
+
+ + Already a member? Log In here + +
+ +

{feedback}

+ +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/Flow.tsx b/autogpt_platform/frontend/src/components/Flow.tsx index 7d7af8c2de80..8fb1505788d7 100644 --- a/autogpt_platform/frontend/src/components/Flow.tsx +++ b/autogpt_platform/frontend/src/components/Flow.tsx @@ -26,8 +26,12 @@ import { import "@xyflow/react/dist/style.css"; import { CustomNode } from "./CustomNode"; import "./flow.css"; -import { Link } from "@/lib/autogpt-server-api"; -import { getTypeColor, filterBlocksByType } from "@/lib/utils"; +import { BlockUIType, Link } from "@/lib/autogpt-server-api"; +import { + getTypeColor, + filterBlocksByType, + findNewlyAddedBlockCoordinates, +} from "@/lib/utils"; import { history } from "./history"; import { CustomEdge } from "./CustomEdge"; import ConnectionLine from "./ConnectionLine"; @@ -45,6 +49,7 @@ import RunnerUIWrapper, { import PrimaryActionBar from "@/components/PrimaryActionButton"; import { useToast } from "@/components/ui/use-toast"; import { forceLoad } from "@sentry/nextjs"; +import { useCopyPaste } from "../hooks/useCopyPaste"; // This is for the history, this is the minimum distance a block must move before it is logged // It helps to prevent spamming the history with small movements especially when pressing on a input in a block @@ -56,6 +61,15 @@ type FlowContextType = { getNextNodeId: () => string; }; +export type NodeDimension = { + [nodeId: string]: { + x: number; + y: number; + width: number; + height: number; + }; +}; + export const FlowContext = createContext(null); const FlowEditor: React.FC<{ @@ -63,8 +77,14 @@ const FlowEditor: React.FC<{ template?: boolean; className?: string; }> = ({ flowID, template, className }) => { - const { addNodes, addEdges, getNode, deleteElements, updateNode } = - useReactFlow(); + const { + addNodes, + addEdges, + getNode, + deleteElements, + updateNode, + setViewport, + } = useReactFlow(); const [nodeId, setNodeId] = useState(1); const [copiedNodes, setCopiedNodes] = useState([]); const [copiedEdges, setCopiedEdges] = useState([]); @@ -109,6 +129,9 @@ const FlowEditor: React.FC<{ const TUTORIAL_STORAGE_KEY = "shepherd-tour"; + // It stores the dimension of all nodes with position as well + const [nodeDimensions, setNodeDimensions] = useState({}); + useEffect(() => { if (params.get("resetTutorial") === "true") { localStorage.removeItem(TUTORIAL_STORAGE_KEY); @@ -401,16 +424,36 @@ const FlowEditor: React.FC<{ return; } - // Calculate the center of the viewport considering zoom - const viewportCenter = { - x: (window.innerWidth / 2 - x) / zoom, - y: (window.innerHeight / 2 - y) / zoom, - }; + /* + Calculate a position to the right of the newly added block, allowing for some margin. + If adding to the right side causes the new block to collide with an existing block, attempt to place it at the bottom or left. + Why not the top? Because the height of the new block is unknown. + If it still collides, run a loop to find the best position where it does not collide. + Then, adjust the canvas to center on the newly added block. + Note: The width is known, e.g., w = 300px for a note and w = 500px for others, but the height is dynamic. + */ + + // Alternative: We could also use D3 force, Intersection for this (React flow Pro examples) + + const viewportCoordinates = + nodeDimensions && Object.keys(nodeDimensions).length > 0 + ? // we will get all the dimension of nodes, then store + findNewlyAddedBlockCoordinates( + nodeDimensions, + (nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500) / zoom, + 60 / zoom, + zoom, + ) + : // we will get all the dimension of nodes, then store + { + x: (window.innerWidth / 2 - x) / zoom, + y: (window.innerHeight / 2 - y) / zoom, + }; const newNode: CustomNode = { id: nodeId.toString(), type: "custom", - position: viewportCenter, // Set the position to the calculated viewport center + position: viewportCoordinates, // Set the position to the calculated viewport center data: { blockType: nodeType, blockCosts: nodeSchema.costs, @@ -432,6 +475,15 @@ const FlowEditor: React.FC<{ setNodeId((prevId) => prevId + 1); clearNodesStatusAndOutput(); // Clear status and output when a new node is added + setViewport( + { + x: -viewportCoordinates.x * zoom + window.innerWidth / 2, + y: -viewportCoordinates.y * zoom + window.innerHeight / 2 - 100, + zoom: 0.8, + }, + { duration: 500 }, + ); + history.push({ type: "ADD_NODE", payload: { node: { ...newNode, ...newNode.data } }, @@ -441,8 +493,10 @@ const FlowEditor: React.FC<{ }, [ nodeId, + setViewport, availableNodes, addNodes, + nodeDimensions, deleteElements, clearNodesStatusAndOutput, x, @@ -451,6 +505,38 @@ const FlowEditor: React.FC<{ ], ); + const findNodeDimensions = useCallback(() => { + const newNodeDimensions: NodeDimension = nodes.reduce((acc, node) => { + const nodeElement = document.querySelector( + `[data-id="custom-node-${node.id}"]`, + ); + if (nodeElement) { + const rect = nodeElement.getBoundingClientRect(); + const { left, top, width, height } = rect; + + // Convert screen coordinates to flow coordinates + const flowX = (left - x) / zoom; + const flowY = (top - y) / zoom; + const flowWidth = width / zoom; + const flowHeight = height / zoom; + + acc[node.id] = { + x: flowX, + y: flowY, + width: flowWidth, + height: flowHeight, + }; + } + return acc; + }, {} as NodeDimension); + + setNodeDimensions(newNodeDimensions); + }, [nodes, x, y, zoom]); + + useEffect(() => { + findNodeDimensions(); + }, [nodes, findNodeDimensions]); + const handleUndo = () => { history.undo(); }; @@ -459,6 +545,8 @@ const FlowEditor: React.FC<{ history.redo(); }; + const handleCopyPaste = useCopyPaste(getNextNodeId); + const handleKeyDown = useCallback( (event: KeyboardEvent) => { // Prevent copy/paste if any modal is open or if the focus is on an input element @@ -470,68 +558,9 @@ const FlowEditor: React.FC<{ if (isAnyModalOpen || isInputField) return; - if (event.ctrlKey || event.metaKey) { - if (event.key === "c" || event.key === "C") { - // Copy selected nodes - const selectedNodes = nodes.filter((node) => node.selected); - const selectedEdges = edges.filter((edge) => edge.selected); - setCopiedNodes(selectedNodes); - setCopiedEdges(selectedEdges); - } - if (event.key === "v" || event.key === "V") { - // Paste copied nodes - if (copiedNodes.length > 0) { - const oldToNewNodeIDMap: Record = {}; - const pastedNodes = copiedNodes.map((node, index) => { - const newNodeId = (nodeId + index).toString(); - oldToNewNodeIDMap[node.id] = newNodeId; - return { - ...node, - id: newNodeId, - position: { - x: node.position.x + 20, // Offset pasted nodes - y: node.position.y + 20, - }, - data: { - ...node.data, - status: undefined, // Reset status - executionResults: undefined, // Clear output data - }, - }; - }); - setNodes((existingNodes) => - // Deselect copied nodes - existingNodes.map((node) => ({ ...node, selected: false })), - ); - addNodes(pastedNodes); - setNodeId((prevId) => prevId + copiedNodes.length); - - const pastedEdges = copiedEdges.map((edge) => { - const newSourceId = oldToNewNodeIDMap[edge.source] ?? edge.source; - const newTargetId = oldToNewNodeIDMap[edge.target] ?? edge.target; - return { - ...edge, - id: `${newSourceId}_${edge.sourceHandle}_${newTargetId}_${edge.targetHandle}_${Date.now()}`, - source: newSourceId, - target: newTargetId, - }; - }); - addEdges(pastedEdges); - } - } - } + handleCopyPaste(event); }, - [ - isAnyModalOpen, - nodes, - edges, - copiedNodes, - setNodes, - addNodes, - copiedEdges, - addEdges, - nodeId, - ], + [isAnyModalOpen, handleCopyPaste], ); useEffect(() => { diff --git a/autogpt_platform/frontend/src/components/NodeOutputs.tsx b/autogpt_platform/frontend/src/components/NodeOutputs.tsx index c7b7dc57100a..fcedf1f408ce 100644 --- a/autogpt_platform/frontend/src/components/NodeOutputs.tsx +++ b/autogpt_platform/frontend/src/components/NodeOutputs.tsx @@ -23,7 +23,6 @@ export default function NodeOutputs({ Pin: {beautifyString(pin)}
-
Data:
@@ -37,6 +36,7 @@ export default function NodeOutputs({ ))}
+
))} diff --git a/autogpt_platform/frontend/src/components/edit/control/SaveControl.tsx b/autogpt_platform/frontend/src/components/edit/control/SaveControl.tsx index 030d83c165b4..61423c352f65 100644 --- a/autogpt_platform/frontend/src/components/edit/control/SaveControl.tsx +++ b/autogpt_platform/frontend/src/components/edit/control/SaveControl.tsx @@ -1,4 +1,4 @@ -import React from "react"; +import React, { useCallback, useEffect } from "react"; import { Popover, PopoverContent, @@ -15,6 +15,7 @@ import { TooltipContent, TooltipTrigger, } from "@/components/ui/tooltip"; +import { useToast } from "@/components/ui/use-toast"; interface SaveControlProps { agentMeta: GraphMeta | null; @@ -52,14 +53,35 @@ export const SaveControl = ({ // Determines if we're saving a template or an agent let isTemplate = agentMeta?.is_template ? true : undefined; - const handleSave = () => { + const handleSave = useCallback(() => { onSave(isTemplate); - }; + }, [onSave, isTemplate]); const getType = () => { return agentMeta?.is_template ? "template" : "agent"; }; + const { toast } = useToast(); + + useEffect(() => { + const handleKeyDown = (event: KeyboardEvent) => { + if ((event.ctrlKey || event.metaKey) && event.key === "s") { + event.preventDefault(); // Stop the browser default action + handleSave(); // Call your save function + toast({ + duration: 2000, + title: "All changes saved successfully!", + }); + } + }; + + window.addEventListener("keydown", handleKeyDown); + + return () => { + window.removeEventListener("keydown", handleKeyDown); + }; + }, [handleSave]); + return ( diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx index 0189086b40d0..252eb6fd704f 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx @@ -41,7 +41,8 @@ export const providerIcons: Record> = { github: FaGithub, google: FaGoogle, notion: NotionLogoIcon, - jina: FaKey, // Use a generic icon for now as there is no specific icon for Jina AI + jina: FaKey, + pinecone: FaKey, }; // --8<-- [end:ProviderIconsEmbed] diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx index c2a0ee28934a..40ad438b929b 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx @@ -17,6 +17,7 @@ const CREDENTIALS_PROVIDER_NAMES = [ "google", "notion", "jina", + "pinecone", ] as const; export type CredentialsProviderName = @@ -26,7 +27,8 @@ const providerDisplayNames: Record = { github: "GitHub", google: "Google", notion: "Notion", - jina: "Jina AI", + jina: "Jina", + pinecone: "Pinecone", }; // --8<-- [end:CredentialsProviderNames] diff --git a/autogpt_platform/frontend/src/hooks/useCopyPaste.ts b/autogpt_platform/frontend/src/hooks/useCopyPaste.ts new file mode 100644 index 000000000000..c5c6400fa865 --- /dev/null +++ b/autogpt_platform/frontend/src/hooks/useCopyPaste.ts @@ -0,0 +1,122 @@ +import { useCallback } from "react"; +import { Node, Edge, useReactFlow, useViewport } from "@xyflow/react"; + +export function useCopyPaste(getNextNodeId: () => string) { + const { setNodes, addEdges, getNodes, getEdges } = useReactFlow(); + const { x, y, zoom } = useViewport(); + + const handleCopyPaste = useCallback( + (event: KeyboardEvent) => { + if (event.ctrlKey || event.metaKey) { + if (event.key === "c" || event.key === "C") { + const selectedNodes = getNodes().filter((node) => node.selected); + const selectedEdges = getEdges().filter((edge) => edge.selected); + + const copiedData = { + nodes: selectedNodes.map((node) => ({ + ...node, + data: { + ...node.data, + connections: [], + }, + })), + edges: selectedEdges, + }; + + localStorage.setItem("copiedFlowData", JSON.stringify(copiedData)); + } + if (event.key === "v" || event.key === "V") { + const copiedDataString = localStorage.getItem("copiedFlowData"); + if (copiedDataString) { + const copiedData = JSON.parse(copiedDataString); + const oldToNewIdMap: Record = {}; + + const viewportCenter = { + x: (window.innerWidth / 2 - x) / zoom, + y: (window.innerHeight / 2 - y) / zoom, + }; + + let minX = Infinity, + minY = Infinity, + maxX = -Infinity, + maxY = -Infinity; + copiedData.nodes.forEach((node: Node) => { + minX = Math.min(minX, node.position.x); + minY = Math.min(minY, node.position.y); + maxX = Math.max(maxX, node.position.x); + maxY = Math.max(maxY, node.position.y); + }); + + const offsetX = viewportCenter.x - (minX + maxX) / 2; + const offsetY = viewportCenter.y - (minY + maxY) / 2; + + const pastedNodes = copiedData.nodes.map((node: Node) => { + const newNodeId = getNextNodeId(); + oldToNewIdMap[node.id] = newNodeId; + return { + ...node, + id: newNodeId, + position: { + x: node.position.x + offsetX, + y: node.position.y + offsetY, + }, + data: { + ...node.data, + status: undefined, + executionResults: undefined, + }, + }; + }); + + const pastedEdges = copiedData.edges.map((edge: Edge) => { + const newSourceId = oldToNewIdMap[edge.source] ?? edge.source; + const newTargetId = oldToNewIdMap[edge.target] ?? edge.target; + return { + ...edge, + id: `${newSourceId}_${edge.sourceHandle}_${newTargetId}_${edge.targetHandle}_${Date.now()}`, + source: newSourceId, + target: newTargetId, + }; + }); + + setNodes((existingNodes) => [ + ...existingNodes.map((node) => ({ ...node, selected: false })), + ...pastedNodes, + ]); + addEdges(pastedEdges); + + setNodes((nodes) => { + return nodes.map((node) => { + if (oldToNewIdMap[node.id]) { + const nodeConnections = pastedEdges + .filter( + (edge) => + edge.source === node.id || edge.target === node.id, + ) + .map((edge) => ({ + edge_id: edge.id, + source: edge.source, + target: edge.target, + sourceHandle: edge.sourceHandle, + targetHandle: edge.targetHandle, + })); + return { + ...node, + data: { + ...node.data, + connections: nodeConnections, + }, + }; + } + return node; + }); + }); + } + } + } + }, + [setNodes, addEdges, getNodes, getEdges, getNextNodeId, x, y, zoom], + ); + + return handleCopyPaste; +} diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/baseClient.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/baseClient.ts index dc80bb0401d4..60f2a33369ab 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/baseClient.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/baseClient.ts @@ -273,10 +273,10 @@ export default class BaseAutoGPTServerAPI { if ( response.status === 403 && - response.statusText === "Not authenticated" && typeof window !== "undefined" // Check if in browser environment ) { window.location.href = "/login"; + return null; } let errorDetail; diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 535fd8ffec45..d7ae2a0a8331 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -96,7 +96,7 @@ export type CredentialsType = "api_key" | "oauth2"; // --8<-- [start:BlockIOCredentialsSubSchema] export type BlockIOCredentialsSubSchema = BlockIOSubSchemaMeta & { - credentials_provider: "github" | "google" | "notion" | "jina"; + credentials_provider: "github" | "google" | "notion" | "jina" | "pinecone"; credentials_scopes?: string[]; credentials_types: Array; }; diff --git a/autogpt_platform/frontend/src/lib/utils.ts b/autogpt_platform/frontend/src/lib/utils.ts index 0c7d2023f90b..76236ced6af1 100644 --- a/autogpt_platform/frontend/src/lib/utils.ts +++ b/autogpt_platform/frontend/src/lib/utils.ts @@ -1,6 +1,7 @@ import { type ClassValue, clsx } from "clsx"; import { twMerge } from "tailwind-merge"; import { Category } from "./autogpt-server-api/types"; +import { NodeDimension } from "@/components/Flow"; export function cn(...inputs: ClassValue[]) { return twMerge(clsx(inputs)); @@ -213,3 +214,78 @@ export function getBehaveAs(): BehaveAs { ? BehaveAs.CLOUD : BehaveAs.LOCAL; } + +function rectanglesOverlap( + rect1: { x: number; y: number; width: number; height?: number }, + rect2: { x: number; y: number; width: number; height?: number }, +): boolean { + const x1 = rect1.x, + y1 = rect1.y, + w1 = rect1.width, + h1 = rect1.height ?? 100; + const x2 = rect2.x, + y2 = rect2.y, + w2 = rect2.width, + h2 = rect2.height ?? 100; + + // Check if the rectangles do not overlap + return !(x1 + w1 <= x2 || x1 >= x2 + w2 || y1 + h1 <= y2 || y1 >= y2 + h2); +} + +export function findNewlyAddedBlockCoordinates( + nodeDimensions: NodeDimension, + newWidth: number, + margin: number, + zoom: number, +) { + const nodeDimensionArray = Object.values(nodeDimensions); + + for (let i = nodeDimensionArray.length - 1; i >= 0; i--) { + const lastNode = nodeDimensionArray[i]; + const lastNodeHeight = lastNode.height ?? 100; + + // Right of the last node + let newX = lastNode.x + lastNode.width + margin; + let newY = lastNode.y; + let newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom }; + + const collisionRight = nodeDimensionArray.some((node) => + rectanglesOverlap(newRect, node), + ); + + if (!collisionRight) { + return { x: newX, y: newY }; + } + + // Left of the last node + newX = lastNode.x - newWidth - margin; + newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom }; + + const collisionLeft = nodeDimensionArray.some((node) => + rectanglesOverlap(newRect, node), + ); + + if (!collisionLeft) { + return { x: newX, y: newY }; + } + + // Below the last node + newX = lastNode.x; + newY = lastNode.y + lastNodeHeight + margin; + newRect = { x: newX, y: newY, width: newWidth, height: 100 / zoom }; + + const collisionBelow = nodeDimensionArray.some((node) => + rectanglesOverlap(newRect, node), + ); + + if (!collisionBelow) { + return { x: newX, y: newY }; + } + } + + // Default position if no space is found + return { + x: 0, + y: 0, + }; +} diff --git a/autogpt_platform/infra/helm/autogpt-builder/values.dev.yaml b/autogpt_platform/infra/helm/autogpt-builder/values.dev.yaml index 793ef400ec12..128ea3ee44a5 100644 --- a/autogpt_platform/infra/helm/autogpt-builder/values.dev.yaml +++ b/autogpt_platform/infra/helm/autogpt-builder/values.dev.yaml @@ -1,9 +1,9 @@ # dev values, overwrite base values as needed. image: - repository: us-east1-docker.pkg.dev/agpt-dev/agpt-builder-dev/agpt-builder-dev + repository: us-east1-docker.pkg.dev/agpt-dev/agpt-frontend-dev/agpt-frontend-dev pullPolicy: Always - tag: "fe3d2a9" + tag: "latest" serviceAccount: annotations: @@ -66,4 +66,5 @@ env: SENTRY_AUTH_TOKEN: "" NEXT_PUBLIC_AUTH_CALLBACK_URL: "https://dev-server.agpt.co/auth/callback" NEXT_PUBLIC_AGPT_WS_SERVER_URL: "wss://dev-ws-server.agpt.co/ws" - NEXT_PUBLIC_AGPT_MARKETPLACE_URL: "https://dev-market.agpt.co/api/v1/market" \ No newline at end of file + NEXT_PUBLIC_AGPT_MARKETPLACE_URL: "https://dev-market.agpt.co/api/v1/market" + NEXT_PUBLIC_BEHAVE_AS: "CLOUD" \ No newline at end of file diff --git a/autogpt_platform/infra/helm/autogpt-builder/values.prod.yaml b/autogpt_platform/infra/helm/autogpt-builder/values.prod.yaml index f1f3a90efca6..4c65a1b17c53 100644 --- a/autogpt_platform/infra/helm/autogpt-builder/values.prod.yaml +++ b/autogpt_platform/infra/helm/autogpt-builder/values.prod.yaml @@ -85,4 +85,5 @@ env: NEXT_PUBLIC_SUPABASE_URL: "https://bgwpwdsxblryihinutbx.supabase.co" GOOGLE_CLIENT_ID: "" GOOGLE_CLIENT_SECRET: "" - SENTRY_AUTH_TOKEN: "" \ No newline at end of file + SENTRY_AUTH_TOKEN: "" + NEXT_PUBLIC_BEHAVE_AS: "CLOUD" \ No newline at end of file diff --git a/autogpt_platform/infra/helm/autogpt-market/templates/sealed-secrets.yaml b/autogpt_platform/infra/helm/autogpt-market/templates/sealed-secrets.yaml index 86258929e81b..41ab87d94396 100644 --- a/autogpt_platform/infra/helm/autogpt-market/templates/sealed-secrets.yaml +++ b/autogpt_platform/infra/helm/autogpt-market/templates/sealed-secrets.yaml @@ -5,6 +5,7 @@ metadata: namespace: {{ .Release.Namespace }} annotations: sealedsecrets.bitnami.com/cluster-wide: "true" + force-update: "true" spec: encryptedData: {{- range $key, $value := .Values.secrets }} diff --git a/autogpt_platform/infra/helm/autogpt-market/values.dev.yaml b/autogpt_platform/infra/helm/autogpt-market/values.dev.yaml index 96d63b40db1a..8b30d3beac0b 100644 --- a/autogpt_platform/infra/helm/autogpt-market/values.dev.yaml +++ b/autogpt_platform/infra/helm/autogpt-market/values.dev.yaml @@ -95,6 +95,6 @@ env: secrets: SUPABASE_JWT_SECRET: "AgBL6H6byPKXVN0nYWgqyoZBHJJJqk8S3rNYSu3JvwOsNn3Sw94SrJcXuE48fxcojcbKvYcDshetFhrSNEsfUwvbSFoaIj+MItPIZ8FGHyfXnmeZJ5j/sdvcjaMmWS7CiL2jhzI+nc8rL2ATV2TgA7E6FA9MvhWqkAyZu04pCd1c9DsAlpcfQ3pxywXjOV+BU0+1+++Z+fTnaugt+hRbhHrKTddaPhi72KrIyJPOUlqfht0JcgflT1f5frvmNnDwkiP862knhJsqg7XyrSy4msCN7eH4BvV01pO3KhAEnMG4Lk/5FeE/2t05+HqgB4mvPbkqfY6g8Lvf6qmd5g+stB10wRmr+TxzokOom36r7sYd3tyXZMgemJOi3BZjYk7774zf/TI081pJx1FPvM6dDPQdrgnU1nhshq/gLyKB5tTTCkPQHW+YhUtApWgrC5mq8ezMqfuwrUuR+NvyO59K56ERJJFAw1fDKHIVl4TYmftet42lkVOchml30cje9gjBtpOrTkFf8lMv5DLQ/ygdwsAYmVrpYtXbT+GAaRI9QIv/rZVuckfz4uIhTp7IjAtRXJreMH2V8GZ22h6Of/BQG+XA42lMA2huIyLwsqmtL0qxNmIu5EGjcXBvlOt5eNquzts68jgNqEfE6fEuqS9lDepvtTSxGp7wmcmWwOBbIgbCnxl8t4IW/e1qvOPuvxMldH+YtVIvfHMf3DnvbehhCIYuLmRM+E6139/qdGKreRyCi4PA4PKTMEG962rSu/7z0X5wu+UYY+kESrs7HhksL3PhB3dpbZ0HF+b6lchziTI0atG+UNX05ysL" - DATABASE_URL: "AgACJkybaNon1X7ZkvtyM0mJ3Gfbjh1LWSJdWGC3ny6QU/8yERomTjvDylHuFoaoGeEs5ewYtgH87G/t1q3LF35cjmipGTSJbxFKfyoRcGBZSajen2Ijs+satVD7T6bmCNsurEsUD16QDLoV5lx+THXdEjv4VrBtFwY0HO2BIP15X/vMie9Mk91Uk1eze5dj8WoQ5OywH8O8Ugh7/iOleEyiPaMzxdRAfwvYHgX5QYqDno0ktZFyKnDOpCnegIwcUei4tt+EiTAG1SxESk48DQQokNrZc+lqlPwozX6FOYkgcQiZZqIN+9qOy5hB+1wKggS8zcd7/YAlSpSd8LMeAokflxOabN5Ctyvx3k5tGgstKQabW1TErwL8RjB3WYClLvJl8bfc4qILr4jpfA4all59f8oinATiyeqJ3Hx267FdHH2aywnXnNdEmycGhKkuH0vAa74oYsUiD9BrjkxmMdxCwmEjl47ob5fwcraKOc7hNF+hPi2C61/z1T6yoIeu4ediMTk0m1ZPIOotlbj/nKkmrKDfD+WMia/YvBpUtlM/SaEyX66gmO03kxW92cmXVCjK65oGh0ueCyK84J9e/2y+qO6yFXh7991+wTQyAedBBoc2myNcebKzuAmFNwanCs+FMOktPZsMMzfFy3oHJQxgFkPEp+jKennODqTe0A3LBhJC3ddwCYgd1TAABC2+DqFGaCRiyaSZ4BZIitEPFzpJwITIFZoRyxrCibrGKKnILVjGNaiV/KdIzfj70AAdzG/7GFdA9SKzRQimnVw99YTjNouCYzt7iLBV/8KrcMvyyeHZle1A6zg0gjjj4Yp6p0ssIOvhuDLjec4NMi/E5EbgYzKQFr7jN2u9%" + DATABASE_URL: "AgACJkybaNon1X7ZkvtyM0mJ3Gfbjh1LWSJdWGC3ny6QU/8yERomTjvDylHuFoaoGeEs5ewYtgH87G/t1q3LF35cjmipGTSJbxFKfyoRcGBZSajen2Ijs+satVD7T6bmCNsurEsUD16QDLoV5lx+THXdEjv4VrBtFwY0HO2BIP15X/vMie9Mk91Uk1eze5dj8WoQ5OywH8O8Ugh7/iOleEyiPaMzxdRAfwvYHgX5QYqDno0ktZFyKnDOpCnegIwcUei4tt+EiTAG1SxESk48DQQokNrZc+lqlPwozX6FOYkgcQiZZqIN+9qOy5hB+1wKggS8zcd7/YAlSpSd8LMeAokflxOabN5Ctyvx3k5tGgstKQabW1TErwL8RjB3WYClLvJl8bfc4qILr4jpfA4all59f8oinATiyeqJ3Hx267FdHH2aywnXnNdEmycGhKkuH0vAa74oYsUiD9BrjkxmMdxCwmEjl47ob5fwcraKOc7hNF+hPi2C61/z1T6yoIeu4ediMTk0m1ZPIOotlbj/nKkmrKDfD+WMia/YvBpUtlM/SaEyX66gmO03kxW92cmXVCjK65oGh0ueCyK84J9e/2y+qO6yFXh7991+wTQyAedBBoc2myNcebKzuAmFNwanCs+FMOktPZsMMzfFy3oHJQxgFkPEp+jKennODqTe0A3LBhJC3ddwCYgd1TAABC2+DqFGaCRiyaSZ4BZIitEPFzpJwITIFZoRyxrCibrGKKnILVjGNaiV/KdIzfj70AAdzG/7GFdA9SKzRQimnVw99YTjNouCYzt7iLBV/8KrcMvyyeHZle1A6zg0gjjj4Yp6p0ssIOvhuDLjec4NMi/E5EbgYzKQFr7jN2u9" SENTRY_DSN: "AgB9i02k9BgaIXF0p9Qyyeo0PRa9bd3UiPBWQ3V4Jn19Vy5XAzKfYvqP8t+vafN2ffY+wCk1FlhYzdIuFjh3oRvdKvtwGEBZk6nLFiUrw/GSum0ueR2OzEy+AwGFXA9FstD0KCMJvyehSv9xRm9kqLOC4Xb/5lOWwTNF3AKqkEMEeKrOWx4OLXG6MLdR7OicY45BCE5WvcV2PizDaN5w3J72eUxFP0HjXit/aW/gK32IJME0RxeuQZ5TnPKTNrooYPR0eWXd2PgYshFjQ2ARy/OsvOrD10y8tQ3M5qx/HNWLC/r0lEu2np+9iUIAE1ufSwjmNSyi4V8usdZWq7xnf3vuKlSgmveqKkLbwQUWj1BpLNIjUvyY+1Rk1rxup/WCgaw+xOZd6sR/qTIjILv5GuzpU0AiwEm7sgl2pmpFXq6n6QjNOfZoPBTL73f4bpXNJ3EyMYDbPxOtGDz91B+bDtOsMr1DNWQslKkk3EIilm/l0+NuLKxf/e2HwM3sB15mkQqVZBdbiVOr7B27cR9xAnr296KE/BU6E9dp/fl+IgcaonMpTsE61pCLHWxQXNBO5X078/zhmaXBQyEBNQ5SPDr9u3pHWrrLkBtXwldZvgmLMMVFMAzrVVkJB4lC9sZj0pXPhda0/BsA4xcGRELj/PizwSr+kb3lDumNMqzEap5ZjEGCBpeeIVSo19v+RoEDw0AFmyxfYx2+91HsgiEqjEUg+J6yDmjAoRpOD1wRZOnnpR8ufMiqdBteCG8B5SXkhgto1WtDyOMVlX2wbmBFVetv2nAbMIA/l4E/Yv8HXiJsTqAkeYc5Qak6/SMGnZTw7Q==" SUPABASE_SERVICE_ROLE_KEY: "AgCrHCd2AdIv++sX7AAf0YoV+qDFhPuErd/Q9Jgj8/1wDJklqv0giI/M7TRUV6j2Gqa/yLP90Hoiy2BboE0V3FrTtHzasxtSK0hd93+bxvZ34FEfKQyAiddBR8OxzlPwaplzaJ+/Tu+yHf1EesgXrUdydk38D4AQqkrC30FRKJcCxJNTHHzsZiHGQLZNP2l0cEsmqtMXMk8TqbcqHvJZRpr8jP1dSJ7bxEdU9mH/zB4HV+EsPLDFWFAnFjbEQwv8FEGgqpy81ifch4Hz7S0wjwk0x/QsagKavBTvI579K6Sx7uJyMyilpzm5Ct8kDXTEGUWv7pFINXM5cAbcBNzuvwvtXmshMwRsl9e/5Y2/T2VgS7/wPTJA4AmyyrSK976SOjo7imb4XfMwc6Cc/2GE0BRW9jiKvzjQ1TC2ovQpNujTYYgPzIq8sFXEVss31DIcfwbRAzgKTTQZKl+H+i9AS0q6iYHtKORwTQ2bv2XwQwxogXMHTUq1oC3MkzjKfV9DcoTHU2o/+gTyOBW5i3BRatuublA1x0EwDoEVmWA1+i1h2bpkl4QYuyeNlhJnRHzuQU3RdFLWn3MkDM8Q4Y9n0/XXwwTCgqtdAExqNh3YJYumWiGiWfdBpEUqlUtOUurNMXy6rHH4odnNKeLQfMOa9406x5H5xiwNkl3mzGjNiPDMS7JMTptlsoL8DshE2TM0PqZVrQy81OsGNpdiU8MVeUdHO6/bDBe7j9v0FipqpeehX1AZEYb/4CWosTJACWpaTnLYRh+w12bk3x6Sj0kriDKMOuJLBRh1fveZXUC9C0FsEPhq2rBLQDVh78DkBIeKVGUzuxDP/6mT3OSBPe4aCye0vTmwtEOEvB+A7rcMkOl+j90bKAveE9H+f7UVU6Og40Nc3sSuMolKHbQyB9TNd4+jOfmySSN675riL6BpFFCSuWqjrqWFr0yI1h/xAg+YMg8WzWarwSeWr3ykXrbhQvu7Oj27ffLXEIvS0gU=" \ No newline at end of file diff --git a/autogpt_platform/infra/helm/autogpt-server/templates/deployment-executor.yaml b/autogpt_platform/infra/helm/autogpt-server/templates/deployment-executor.yaml index ad9e1962f18f..c71a1a13fb28 100644 --- a/autogpt_platform/infra/helm/autogpt-server/templates/deployment-executor.yaml +++ b/autogpt_platform/infra/helm/autogpt-server/templates/deployment-executor.yaml @@ -46,6 +46,9 @@ spec: - name: http containerPort: {{ .Values.serviceExecutor.port }} protocol: TCP + - name: db-http + containerPort: {{ .Values.serviceDBManager.port }} + protocol: TCP resources: {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.volumeMounts }} diff --git a/autogpt_platform/infra/helm/autogpt-server/templates/service-executor.yaml b/autogpt_platform/infra/helm/autogpt-server/templates/service-executor.yaml index 36ef560b2220..6d8d13ec6066 100644 --- a/autogpt_platform/infra/helm/autogpt-server/templates/service-executor.yaml +++ b/autogpt_platform/infra/helm/autogpt-server/templates/service-executor.yaml @@ -15,5 +15,9 @@ spec: targetPort: http protocol: TCP name: http + - port: {{ .Values.serviceDBManager.port }} + targetPort: db-http + protocol: TCP + name: db-http selector: - app.kubernetes.io/component: executor + app.kubernetes.io/component: executor \ No newline at end of file diff --git a/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml b/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml index 19349017aef4..9150de2dfe23 100644 --- a/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml +++ b/autogpt_platform/infra/helm/autogpt-server/values.dev.yaml @@ -1,7 +1,7 @@ # dev values, overwrite base values as needed. image: - repository: us-east1-docker.pkg.dev/agpt-dev/agpt-server-dev/agpt-server-dev + repository: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev pullPolicy: Always tag: "latest" @@ -25,6 +25,13 @@ serviceExecutor: annotations: beta.cloud.google.com/backend-config: '{"default": "autogpt-server-backend-config"}' +serviceDBManager: + type: ClusterIP + port: 8005 + targetPort: 8005 + annotations: + beta.cloud.google.com/backend-config: '{"default": "autogpt-server-backend-config"}' + ingress: enabled: true className: "gce" @@ -58,7 +65,7 @@ resources: livenessProbe: httpGet: - path: /heath + path: /health port: 8006 initialDelaySeconds: 30 periodSeconds: 10 @@ -106,6 +113,7 @@ env: SUPABASE_URL: "https://adfjtextkuilwuhzdjpf.supabase.co" AGENTSERVER_HOST: "autogpt-server.dev-agpt.svc.cluster.local" EXECUTIONMANAGER_HOST: "autogpt-server-executor.dev-agpt.svc.cluster.local" + DATABASEMANAGER_HOST: "autogpt-server-executor.dev-agpt.svc.cluster.local" secrets: ANTHROPIC_API_KEY: "AgBllA6KzTdyLs6Tc+HrwIeSjdsPQxdU/4qpqT64H4K3nTehS6kpCW1qtH6eBChs1v+m857sUgsrB9u8+P0aAa3DcgZ/gNg+G1GX6vAY2NJvP/2Q+Hiwi1cAn+R3ChHejG9P2C33hTa6+V9cpUI9xUWOwWLOIQZpLvAc7ltsi0ZJ06qFO0Zhj+H9K768h7U3XaivwywX7PT7BnUTiT6AQkAwD2misBkeSQZdsllOD0th3b2245yieqal9osZHlSlslI9c6EMpH0n+szSND7goyjgsik0Tb0xJU6kGggdcw9hl4x91rYDYNPs0hFES9HUxzfiAid6Y2rDUVBXoNg7K7pMR6/foIkl+gCg/1lqOS0FRlUVyAQGJEx6XphyX/SftgLaI7obaVnzjErrpLWY1ZRiD8VVZD40exf8FddGOXwPvxYHrrrPotlTDLONZMn4Fl46tJCTsoQfHCjco+sz7/nLMMnHx+l1D0eKBuGPVsKTtbWozhLCNuWEgcWb4kxJK5sd1g/GylD43g8hFW531Vbpk1J1rpf7Hurd/aTUjwSXmdxB2qXTT4HRG+Us6PnhMIuf/yxilTs4WNShY0zHhYgnQFSM3oCTL6XXG1dqdOwY2k6+k2wCQtpK45boVN5PpBrQuDuFdWb/jM5jH6L8ns0dMMlY3lHM459u7FEn8rum/xXdP/JvpFb+yct3Rgc54SOT5HuVUNAHzzmbWhY4RG4b3i21L2SlsVUwjKvu+PlN4MN5KPilvHe3yODXZu0Gp0ClzDNZQiKQU67H0uYr6eRccMDsHtMlPELqnjyQZ+OriydzB3qXidAkguKNmzPypz0LyTMnry7YpNRGyUw=" diff --git a/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml b/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml index 0e73afa33175..c6e96862cc6f 100644 --- a/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml +++ b/autogpt_platform/infra/helm/autogpt-server/values.prod.yaml @@ -25,6 +25,13 @@ serviceExecutor: annotations: beta.cloud.google.com/backend-config: '{"default": "autogpt-server-backend-config"}' +serviceDBManager: + type: ClusterIP + port: 8005 + targetPort: 8005 + annotations: + beta.cloud.google.com/backend-config: '{"default": "autogpt-server-backend-config"}' + ingress: enabled: true className: "gce" @@ -72,7 +79,7 @@ cors: livenessProbe: httpGet: - path: /heath + path: /health port: 8006 initialDelaySeconds: 30 periodSeconds: 10 @@ -80,7 +87,7 @@ livenessProbe: failureThreshold: 6 readinessProbe: httpGet: - path: /heath + path: /health port: 8006 initialDelaySeconds: 30 periodSeconds: 10 @@ -103,6 +110,7 @@ env: SUPABASE_URL: "https://bgwpwdsxblryihinutbx.supabase.co" AGENTSERVER_HOST: "autogpt-server.prod-agpt.svc.cluster.local" EXECUTIONMANAGER_HOST: "autogpt-server-executor.prod-agpt.svc.cluster.local" + DATABASEMANAGER_HOST: "autogpt-server-executor.prod-agpt.svc.cluster.local" secrets: ANTHROPIC_API_KEY: "AgCf0CUyhYluMW13zvdScIzF50c1u4P3sUkKZjwe2lJGil/WrxN1r+GQGoLzjMn8ODANV7FiJN2+Y+ilVgpf0tVA9uEWLCL/OguNshRYWfNfU0PCgciXvz+Cy8xILfJW5SIZvZgDV5zMbzXeBomJYq+qFpr+PRyiIzA6ciHK/ZuItcGBB0FMdJ6w2gvAlLTFmAK0ekyXTzYidPEkBp+DA4jJXuzjXGd4U8iC4IcrSs/o0eaqfMQSOBRc7w/6SK+YDUnWypc2awBX4qNwqKbQRYAT59lihy/B0D4BhjjiUb2bAlzNWP0STsJONrOPbnHzuvipm1xpk+1bdYFpkqJAf9rk9GOPAMfB5f/kOdmaoj9jdQN55NIomSzub+KnSGt+m4G6YlEnUf2ZBZTKTeWO1jzk0gnzrdFZclPq/9Dd0qUBsZ/30KjbBRJyL9SexwxpfMoaf6dKJHcsOdOevaCpMQZaQ/AjcFZRtntw8mLALJzTZbTq7Gb6h25blwe1Oi6DrOuTrWT+OMHeUJcDQA3q1rJERa4xV0wLjYraCTerezhZgjMvfRD1Ykm5S+1U9hzsZUZZQS6OEEIS0BaOfYugt3DiFSNLrIUwVcYbl5geLoiMW6oSukEeb4s2AukRqKkMYz8/stjCgJB2NiarVi2NIaDvgaXWLgJxNxxovgtHyS4RR8WpRPdWJdjAs6RH13ve42a35S2m65jvUNg875GSO8Eo1izYH6q2LvJgGmlTfMworP6O2ryZO9tBjNS58UYxM8EqvtXLVktA0TYlK7wlF2NzA/waIMmiOiKJrb8YnQF28ePxYnmQSqqe2ZpwSiDBsDNrzfZvvTk9Ai81qu8=" diff --git a/autogpt_platform/infra/helm/autogpt-websocket-server/values.dev.yaml b/autogpt_platform/infra/helm/autogpt-websocket-server/values.dev.yaml index a977f9bece0c..dd26b32f4030 100644 --- a/autogpt_platform/infra/helm/autogpt-websocket-server/values.dev.yaml +++ b/autogpt_platform/infra/helm/autogpt-websocket-server/values.dev.yaml @@ -1,7 +1,7 @@ replicaCount: 1 # not scaling websocket server for now image: - repository: us-east1-docker.pkg.dev/agpt-dev/agpt-server-dev/agpt-server-dev + repository: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev tag: latest pullPolicy: Always diff --git a/autogpt_platform/infra/terraform/environments/dev.tfvars b/autogpt_platform/infra/terraform/environments/dev.tfvars index 1f72c6d907a0..c193e8399a9a 100644 --- a/autogpt_platform/infra/terraform/environments/dev.tfvars +++ b/autogpt_platform/infra/terraform/environments/dev.tfvars @@ -28,6 +28,10 @@ service_accounts = { "dev-agpt-market-sa" = { display_name = "AutoGPT Dev Market Server Account" description = "Service account for agpt dev market server" + }, + "dev-github-actions-sa" = { + display_name = "GitHub Actions Dev Service Account" + description = "Service account for GitHub Actions deployments to dev" } } @@ -51,6 +55,11 @@ workload_identity_bindings = { service_account_name = "dev-agpt-market-sa" namespace = "dev-agpt" ksa_name = "dev-agpt-market-sa" + }, + "dev-github-actions-workload-identity" = { + service_account_name = "dev-github-actions-sa" + namespace = "dev-agpt" + ksa_name = "dev-github-actions-sa" } } @@ -59,7 +68,8 @@ role_bindings = { "serviceAccount:dev-agpt-server-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-builder-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-ws-server-sa@agpt-dev.iam.gserviceaccount.com", - "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com" + "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com", + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" ], "roles/cloudsql.client" = [ "serviceAccount:dev-agpt-server-sa@agpt-dev.iam.gserviceaccount.com", @@ -80,7 +90,8 @@ role_bindings = { "serviceAccount:dev-agpt-server-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-builder-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-ws-server-sa@agpt-dev.iam.gserviceaccount.com", - "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com" + "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com", + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" ] "roles/compute.networkUser" = [ "serviceAccount:dev-agpt-server-sa@agpt-dev.iam.gserviceaccount.com", @@ -93,6 +104,16 @@ role_bindings = { "serviceAccount:dev-agpt-builder-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-ws-server-sa@agpt-dev.iam.gserviceaccount.com", "serviceAccount:dev-agpt-market-sa@agpt-dev.iam.gserviceaccount.com" + ], + "roles/artifactregistry.writer" = [ + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" + ], + "roles/container.viewer" = [ + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" + ], + "roles/iam.serviceAccountTokenCreator" = [ + "principalSet://iam.googleapis.com/projects/638488734936/locations/global/workloadIdentityPools/dev-pool/*", + "serviceAccount:dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com" ] } @@ -101,4 +122,25 @@ services_ip_cidr_range = "10.2.0.0/20" public_bucket_names = ["website-artifacts"] standard_bucket_names = [] -bucket_admins = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] \ No newline at end of file +bucket_admins = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] + +workload_identity_pools = { + "dev-pool" = { + display_name = "Development Identity Pool" + providers = { + "github" = { + issuer_uri = "https://token.actions.githubusercontent.com" + attribute_mapping = { + "google.subject" = "assertion.sub" + "attribute.repository" = "assertion.repository" + "attribute.repository_owner" = "assertion.repository_owner" + } + } + } + service_accounts = { + "dev-github-actions-sa" = [ + "Significant-Gravitas/AutoGPT" + ] + } + } +} \ No newline at end of file diff --git a/autogpt_platform/infra/terraform/environments/prod.tfvars b/autogpt_platform/infra/terraform/environments/prod.tfvars index e9351389285a..e75b5b3d45ce 100644 --- a/autogpt_platform/infra/terraform/environments/prod.tfvars +++ b/autogpt_platform/infra/terraform/environments/prod.tfvars @@ -28,6 +28,10 @@ service_accounts = { "prod-agpt-market-sa" = { display_name = "AutoGPT prod Market backend Account" description = "Service account for agpt prod market backend" + }, + "prod-github-actions-sa" = { + display_name = "GitHub Actions Prod Service Account" + description = "Service account for GitHub Actions deployments to prod" } } @@ -59,7 +63,8 @@ role_bindings = { "serviceAccount:prod-agpt-backend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-frontend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-ws-backend-sa@agpt-prod.iam.gserviceaccount.com", - "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com" + "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com", + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" ], "roles/cloudsql.client" = [ "serviceAccount:prod-agpt-backend-sa@agpt-prod.iam.gserviceaccount.com", @@ -80,7 +85,8 @@ role_bindings = { "serviceAccount:prod-agpt-backend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-frontend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-ws-backend-sa@agpt-prod.iam.gserviceaccount.com", - "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com" + "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com", + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" ] "roles/compute.networkUser" = [ "serviceAccount:prod-agpt-backend-sa@agpt-prod.iam.gserviceaccount.com", @@ -93,6 +99,16 @@ role_bindings = { "serviceAccount:prod-agpt-frontend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-ws-backend-sa@agpt-prod.iam.gserviceaccount.com", "serviceAccount:prod-agpt-market-sa@agpt-prod.iam.gserviceaccount.com" + ], + "roles/artifactregistry.writer" = [ + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" + ], + "roles/container.viewer" = [ + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" + ], + "roles/iam.serviceAccountTokenCreator" = [ + "principalSet://iam.googleapis.com/projects/1021527134101/locations/global/workloadIdentityPools/prod-pool/*", + "serviceAccount:prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com" ] } @@ -101,4 +117,25 @@ services_ip_cidr_range = "10.2.0.0/20" public_bucket_names = ["website-artifacts"] standard_bucket_names = [] -bucket_admins = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] \ No newline at end of file +bucket_admins = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] + +workload_identity_pools = { + "prod-pool" = { + display_name = "Production Identity Pool" + providers = { + "github" = { + issuer_uri = "https://token.actions.githubusercontent.com" + attribute_mapping = { + "google.subject" = "assertion.sub" + "attribute.repository" = "assertion.repository" + "attribute.repository_owner" = "assertion.repository_owner" + } + } + } + service_accounts = { + "prod-github-actions-sa" = [ + "Significant-Gravitas/AutoGPT" + ] + } + } +} \ No newline at end of file diff --git a/autogpt_platform/infra/terraform/main.tf b/autogpt_platform/infra/terraform/main.tf index 31049ed40cb9..047ebd59c661 100644 --- a/autogpt_platform/infra/terraform/main.tf +++ b/autogpt_platform/infra/terraform/main.tf @@ -61,6 +61,7 @@ module "iam" { service_accounts = var.service_accounts workload_identity_bindings = var.workload_identity_bindings role_bindings = var.role_bindings + workload_identity_pools = var.workload_identity_pools } module "storage" { diff --git a/autogpt_platform/infra/terraform/modules/iam/main.tf b/autogpt_platform/infra/terraform/modules/iam/main.tf index 3a07d6926456..f632f4c42b43 100644 --- a/autogpt_platform/infra/terraform/modules/iam/main.tf +++ b/autogpt_platform/infra/terraform/modules/iam/main.tf @@ -23,4 +23,31 @@ resource "google_project_iam_binding" "role_bindings" { role = each.key members = each.value +} + +resource "google_iam_workload_identity_pool" "pools" { + for_each = var.workload_identity_pools + workload_identity_pool_id = each.key + display_name = each.value.display_name +} + +resource "google_iam_workload_identity_pool_provider" "providers" { + for_each = merge([ + for pool_id, pool in var.workload_identity_pools : { + for provider_id, provider in pool.providers : + "${pool_id}/${provider_id}" => merge(provider, { + pool_id = pool_id + }) + } + ]...) + + workload_identity_pool_id = split("/", each.key)[0] + workload_identity_pool_provider_id = split("/", each.key)[1] + + attribute_mapping = each.value.attribute_mapping + oidc { + issuer_uri = each.value.issuer_uri + allowed_audiences = each.value.allowed_audiences + } + attribute_condition = "assertion.repository_owner==\"Significant-Gravitas\"" } \ No newline at end of file diff --git a/autogpt_platform/infra/terraform/modules/iam/outputs.tf b/autogpt_platform/infra/terraform/modules/iam/outputs.tf index b503414873a9..19354364bebb 100644 --- a/autogpt_platform/infra/terraform/modules/iam/outputs.tf +++ b/autogpt_platform/infra/terraform/modules/iam/outputs.tf @@ -1,4 +1,14 @@ output "service_account_emails" { description = "The emails of the created service accounts" value = { for k, v in google_service_account.service_accounts : k => v.email } -} \ No newline at end of file +} + +output "workload_identity_pools" { + value = google_iam_workload_identity_pool.pools +} + +output "workload_identity_providers" { + value = { + for k, v in google_iam_workload_identity_pool_provider.providers : k => v.name + } +} diff --git a/autogpt_platform/infra/terraform/modules/iam/variables.tf b/autogpt_platform/infra/terraform/modules/iam/variables.tf index 61637a718783..c9563ea0c7b8 100644 --- a/autogpt_platform/infra/terraform/modules/iam/variables.tf +++ b/autogpt_platform/infra/terraform/modules/iam/variables.tf @@ -26,4 +26,17 @@ variable "role_bindings" { description = "Map of roles to list of members" type = map(list(string)) default = {} +} + +variable "workload_identity_pools" { + type = map(object({ + display_name = string + providers = map(object({ + issuer_uri = string + attribute_mapping = map(string) + allowed_audiences = optional(list(string)) + })) + service_accounts = map(list(string)) # Map of SA to list of allowed principals + })) + default = {} } \ No newline at end of file diff --git a/autogpt_platform/infra/terraform/variables.tf b/autogpt_platform/infra/terraform/variables.tf index 1afd9509fcbc..3b4eb92dff9c 100644 --- a/autogpt_platform/infra/terraform/variables.tf +++ b/autogpt_platform/infra/terraform/variables.tf @@ -130,3 +130,19 @@ variable "bucket_admins" { default = ["gcp-devops-agpt@agpt.co", "gcp-developers@agpt.co"] } +variable "workload_identity_pools" { + type = map(object({ + display_name = string + providers = map(object({ + issuer_uri = string + attribute_mapping = map(string) + allowed_audiences = optional(list(string)) + })) + service_accounts = map(list(string)) + })) + default = {} + description = "Configuration for workload identity pools and their providers" +} + + + diff --git a/autogpt_platform/market/.env.example b/autogpt_platform/market/.env.example index 5522998fc10c..b3b1cb8a4e86 100644 --- a/autogpt_platform/market/.env.example +++ b/autogpt_platform/market/.env.example @@ -9,4 +9,4 @@ ENABLE_AUTH=true SUPABASE_JWT_SECRET=our-super-secret-jwt-token-with-at-least-32-characters-long BACKEND_CORS_ALLOW_ORIGINS="http://localhost:3000,http://127.0.0.1:3000" -APP_ENV=local \ No newline at end of file +APP_ENV=local diff --git a/docs/content/server/new_blocks.md b/docs/content/server/new_blocks.md index 8cc5a5253836..1c7c3684cf7f 100644 --- a/docs/content/server/new_blocks.md +++ b/docs/content/server/new_blocks.md @@ -196,7 +196,7 @@ class BlockWithAPIKeyAndOAuth(Block): The credentials will be automagically injected by the executor in the back end. -The `APIKeyCredentials` and `OAuth2Credentials` models are defined [here](https://github.com/Significant-Gravitas/AutoGPT/blob/master/rnd/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py). +The `APIKeyCredentials` and `OAuth2Credentials` models are defined [here](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py). To use them in e.g. an API request, you can either access the token directly: ```python