diff --git a/docs/source/examples/single_use_workers.py b/docs/source/examples/single_use_workers.py index 6b8d53c6..5a379073 100644 --- a/docs/source/examples/single_use_workers.py +++ b/docs/source/examples/single_use_workers.py @@ -31,10 +31,10 @@ async def amain(): nursery.start_soon(ctx.run_sync, worker, i) print("dual use worker behavior:") - async with trio_parallel.open_worker_context(retire=after_dual_use) as ctx: + async with trio_parallel.cache_scope(retire=after_dual_use): async with trio.open_nursery() as nursery: for i in range(10): - nursery.start_soon(ctx.run_sync, worker, i) + nursery.start_soon(trio_parallel.run_sync, worker, i) print("default behavior:") async with trio.open_nursery() as nursery: diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 11daedb5..e9f4e900 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -138,6 +138,15 @@ lifetime is required in a subset of your application. .. autoclass:: WorkerContext() :members: +Alternatively, you can implicitly override the default context of :func:`run_sync` +in any subset of the task tree using `cache_scope()`. This async context manager +sets an internal TreeVar_ so that the current task and all nested subtasks operate +using an internal, isolated `WorkerContext`, without having to manually pass a +context object around. + +.. autofunction:: cache_scope + :async-with: ctx + One typical use case for configuring workers is to set a policy for taking a worker out of service. For this, use the ``retire`` argument. This example shows how to build (trivial) stateless and stateful worker retirement policies. @@ -145,11 +154,11 @@ build (trivial) stateless and stateful worker retirement policies. .. literalinclude:: examples/single_use_workers.py A more realistic use-case might examine the worker process's memory usage (e.g. with -`psutil `_) and retire if usage is too high. +psutil_) and retire if usage is too high. If you are retiring workers frequently, like in the single-use case, a large amount -of process startup overhead will be incurred with the default worker type. If your -platform supports it, an alternate `WorkerType` might cut that overhead down. +of process startup overhead will be incurred with the default "spawn" worker type. +If your platform supports it, an alternate `WorkerType` might cut that overhead down. .. autoclass:: WorkerType() @@ -161,4 +170,6 @@ You probably won't use these... but create an issue if you do and need help! .. autofunction:: default_context_statistics .. _cloudpickle: https://github.com/cloudpipe/cloudpickle +.. _psutil: https://psutil.readthedocs.io/en/latest/ .. _service: https://github.com/richardsheridan/trio-parallel/issues/348 +.. _TreeVar: https://tricycle.readthedocs.io/en/latest/reference.html#tricycle.TreeVar diff --git a/trio_parallel/__init__.py b/trio_parallel/__init__.py index 9b437753..a4fd4403 100644 --- a/trio_parallel/__init__.py +++ b/trio_parallel/__init__.py @@ -3,6 +3,7 @@ from ._impl import ( run_sync, open_worker_context, + cache_scope, WorkerContext, WorkerType, current_default_worker_limiter, diff --git a/trio_parallel/_impl.py b/trio_parallel/_impl.py index 59187f61..6cb349b8 100644 --- a/trio_parallel/_impl.py +++ b/trio_parallel/_impl.py @@ -390,6 +390,8 @@ async def cache_scope( worker_type=WorkerType.SPAWN, ): """ + Override the configuration of `trio_parallel.run_sync()` in this and all subtasks. + The context will automatically wait for any running workers to become idle when exiting the scope. Since this wait cannot be cancelled, it is more convenient to only pass the context object to tasks that cannot outlive the scope, for example,