From 686c00a9666f254a75010cc2e96adf1bce822896 Mon Sep 17 00:00:00 2001 From: Victor <49424955+vbourgin@users.noreply.github.com> Date: Wed, 22 Jan 2025 16:34:00 -0800 Subject: [PATCH] Ignore `pin_memory` if cuda is not available Differential Revision: D68357863 Pull Request resolved: https://github.com/facebookresearch/spdl/pull/331 --- src/spdl/dataloader/_pytorch_dataloader.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/spdl/dataloader/_pytorch_dataloader.py b/src/spdl/dataloader/_pytorch_dataloader.py index 6ade3893..93934162 100644 --- a/src/spdl/dataloader/_pytorch_dataloader.py +++ b/src/spdl/dataloader/_pytorch_dataloader.py @@ -323,7 +323,15 @@ def get_pytorch_dataloader( from torch.utils.data._utils.pin_memory import pin_memory as pin_memory_fn - transfer_fn = pin_memory_fn if pin_memory else None + if pin_memory and not torch.cuda.is_available(): + _LG.warning( + "'pin_memory' argument is set as true but no accelerator is found, " + "then device pinned memory won't be used." + ) + + transfer_fn = ( + pin_memory_fn if pin_memory and torch.accelerator.is_available() else None + ) mp_ctx = ( multiprocessing_context