diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py index 787683623313..6a4bb354fc87 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py @@ -6,7 +6,7 @@ from redis import Redis from backend.executor.database import DatabaseManager -from autogpt_libs.utils.cache import thread_cached_property +from autogpt_libs.utils.cache import thread_cached from autogpt_libs.utils.synchronize import RedisKeyedMutex from .types import ( @@ -21,8 +21,9 @@ class SupabaseIntegrationCredentialsStore: def __init__(self, redis: "Redis"): self.locks = RedisKeyedMutex(redis) - - @thread_cached_property + + @property + @thread_cached def db_manager(self) -> "DatabaseManager": from backend.executor.database import DatabaseManager from backend.util.service import get_service_client diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py index b4506dda47b8..9c69da9411e2 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py @@ -1,8 +1,6 @@ +from typing import Callable, TypeVar, ParamSpec import threading -from functools import wraps -from typing import Callable, ParamSpec, TypeVar -T = TypeVar("T") P = ParamSpec("P") R = TypeVar("R") @@ -10,7 +8,6 @@ def thread_cached(func: Callable[P, R]) -> Callable[P, R]: thread_local = threading.local() - @wraps(func) def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: cache = getattr(thread_local, "cache", None) if cache is None: @@ -21,7 +18,3 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: return cache[key] return wrapper - - -def thread_cached_property(func: Callable[[T], R]) -> property: - return property(thread_cached(func)) diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index 979631c0585e..5080e16031bf 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -4,7 +4,7 @@ from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger -from autogpt_libs.utils.cache import thread_cached_property +from autogpt_libs.utils.cache import thread_cached from backend.data.block import BlockInput from backend.data.schedule import ( @@ -37,7 +37,8 @@ def __init__(self, refresh_interval=10): def get_port(cls) -> int: return Config().execution_scheduler_port - @thread_cached_property + @property + @thread_cached def execution_client(self) -> ExecutionManager: return get_service_client(ExecutionManager) diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index c908ad9fb55a..8c3ed3dcba14 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -8,7 +8,7 @@ import uvicorn from autogpt_libs.auth.middleware import auth_middleware -from autogpt_libs.utils.cache import thread_cached_property +from autogpt_libs.utils.cache import thread_cached from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse @@ -308,11 +308,13 @@ async def wrapper(*args, **kwargs): return wrapper - @thread_cached_property + @property + @thread_cached def execution_manager_client(self) -> ExecutionManager: return get_service_client(ExecutionManager) - @thread_cached_property + @property + @thread_cached def execution_scheduler_client(self) -> ExecutionScheduler: return get_service_client(ExecutionScheduler) @@ -541,7 +543,7 @@ async def stop_graph_run( ) await asyncio.to_thread( - self.execution_manager_client.cancel_execution(graph_exec_id) + lambda: self.execution_manager_client.cancel_execution(graph_exec_id) ) # Retrieve & return canceled graph execution in its final state @@ -617,11 +619,15 @@ async def create_schedule( graph = await graph_db.get_graph(graph_id, user_id=user_id) if not graph: raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") - execution_scheduler = self.execution_scheduler_client + return { "id": await asyncio.to_thread( - execution_scheduler.add_execution_schedule( - graph_id, graph.version, cron, input_data, user_id=user_id + lambda: self.execution_scheduler_client.add_execution_schedule( + graph_id=graph_id, + graph_version=graph.version, + cron=cron, + input_data=input_data, + user_id=user_id, ) ) }