diff --git a/docs/source/engines.rst b/docs/source/engines.rst index 6d2694124b..afb2682822 100644 --- a/docs/source/engines.rst +++ b/docs/source/engines.rst @@ -5,12 +5,6 @@ Engines ======= -Multi-GPU data parallel ------------------------ - -.. automodule:: monai.engines.multi_gpu_supervised_trainer - :members: - Workflows --------- diff --git a/docs/source/metrics.rst b/docs/source/metrics.rst index 5aab94c791..616f0fe385 100644 --- a/docs/source/metrics.rst +++ b/docs/source/metrics.rst @@ -61,7 +61,7 @@ Metrics `Mean IoU` ---------- -.. autofunction:: compute_meaniou +.. autofunction:: compute_iou .. autoclass:: MeanIoU :members: diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index b35fa5d585..5aabdcdc33 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -996,24 +996,12 @@ Utility :members: :special-members: __call__ -`AsChannelFirst` -"""""""""""""""" -.. autoclass:: AsChannelFirst - :members: - :special-members: __call__ - `AsChannelLast` """"""""""""""" .. autoclass:: AsChannelLast :members: :special-members: __call__ -`AddChannel` -"""""""""""" -.. autoclass:: AddChannel - :members: - :special-members: __call__ - `EnsureChannelFirst` """""""""""""""""""" .. autoclass:: EnsureChannelFirst @@ -1032,12 +1020,6 @@ Utility :members: :special-members: __call__ -`SplitChannel` -"""""""""""""" -.. autoclass:: SplitChannel - :members: - :special-members: __call__ - `CastToType` """""""""""" .. autoclass:: CastToType @@ -1983,24 +1965,12 @@ Utility (Dict) :members: :special-members: __call__ -`AsChannelFirstd` -""""""""""""""""" -.. autoclass:: AsChannelFirstd - :members: - :special-members: __call__ - `AsChannelLastd` """""""""""""""" .. autoclass:: AsChannelLastd :members: :special-members: __call__ -`AddChanneld` -""""""""""""" -.. autoclass:: AddChanneld - :members: - :special-members: __call__ - `EnsureChannelFirstd` """"""""""""""""""""" .. autoclass:: EnsureChannelFirstd @@ -2019,12 +1989,6 @@ Utility (Dict) :members: :special-members: __call__ -`SplitChanneld` -""""""""""""""" -.. autoclass:: SplitChanneld - :members: - :special-members: __call__ - `CastToTyped` """"""""""""" .. autoclass:: CastToTyped diff --git a/monai/apps/auto3dseg/ensemble_builder.py b/monai/apps/auto3dseg/ensemble_builder.py index 5d7b96e4ba..e29745e5cf 100644 --- a/monai/apps/auto3dseg/ensemble_builder.py +++ b/monai/apps/auto3dseg/ensemble_builder.py @@ -36,7 +36,7 @@ from monai.bundle import ConfigParser from monai.data import partition_dataset from monai.transforms import MeanEnsemble, SaveImage, VoteEnsemble -from monai.utils import RankFilter, deprecated_arg +from monai.utils import RankFilter from monai.utils.enums import AlgoKeys from monai.utils.misc import check_kwargs_exist_in_class_init, prob2class from monai.utils.module import look_up_option, optional_import @@ -332,13 +332,6 @@ class AlgoEnsembleBuilder: """ - @deprecated_arg( - "data_src_cfg_filename", - since="1.2", - removed="1.3", - new_name="data_src_cfg_name", - msg_suffix="please use `data_src_cfg_name` instead.", - ) def __init__(self, history: Sequence[dict[str, Any]], data_src_cfg_name: str | None = None): self.infer_algos: list[dict[AlgoKeys, Any]] = [] self.ensemble: AlgoEnsemble diff --git a/monai/apps/deepgrow/transforms.py b/monai/apps/deepgrow/transforms.py index 6b9894227c..ce9cd2c5b7 100644 --- a/monai/apps/deepgrow/transforms.py +++ b/monai/apps/deepgrow/transforms.py @@ -441,8 +441,8 @@ def __call__(self, data): if np.all(np.less(current_size, self.spatial_size)): cropper = SpatialCrop(roi_center=center, roi_size=self.spatial_size) - box_start = np.array([s.start for s in cropper.slices]) - box_end = np.array([s.stop for s in cropper.slices]) + box_start = np.array([s.start for s in cropper.slices]) # type: ignore[assignment] + box_end = np.array([s.stop for s in cropper.slices]) # type: ignore[assignment] else: cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) @@ -492,9 +492,6 @@ class AddGuidanceFromPointsd(Transform): For example, to handle key `image`, read/write affine matrices from the metadata `image_meta_dict` dictionary's `affine` field. - .. deprecated:: 0.6.0 - ``dimensions`` is deprecated, use ``spatial_dims`` instead. - """ def __init__( diff --git a/monai/bundle/scripts.py b/monai/bundle/scripts.py index 5ab541a625..f81b7e7629 100644 --- a/monai/bundle/scripts.py +++ b/monai/bundle/scripts.py @@ -704,7 +704,6 @@ def get_bundle_info( return bundle_info[version] # type: ignore[no-any-return] -@deprecated_arg("runner_id", since="1.1", removed="1.3", new_name="run_id", msg_suffix="please use `run_id` instead.") def run( run_id: str | None = None, init_id: str | None = None, @@ -766,7 +765,7 @@ def run( will patch the target config content with `tracking handlers` and the top-level items of `configs`. for detailed usage examples, please check the tutorial: https://github.com/Project-MONAI/tutorials/blob/main/experiment_management/bundle_integrate_mlflow.ipynb. - args_file: a JSON or YAML file to provide default values for `runner_id`, `meta_file`, + args_file: a JSON or YAML file to provide default values for `run_id`, `meta_file`, `config_file`, `logging`, and override pairs. so that the command line inputs can be simplified. override: id-value pairs to override or add the corresponding config content. e.g. ``--net#input_chns 42``, ``--net %/data/other.json#net_arg``. diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 4f2061426e..5e403d6fdb 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -1482,9 +1482,6 @@ class CSVDataset(Dataset): kwargs_read_csv: dictionary args to pass to pandas `read_csv` function. kwargs: additional arguments for `pandas.merge()` API to join tables. - .. deprecated:: 0.8.0 - ``filename`` is deprecated, use ``src`` instead. - """ def __init__( diff --git a/monai/data/grid_dataset.py b/monai/data/grid_dataset.py index 43c72b5a78..06954e9f11 100644 --- a/monai/data/grid_dataset.py +++ b/monai/data/grid_dataset.py @@ -185,9 +185,6 @@ class GridPatchDataset(IterableDataset): transform: a callable data transform operates on the patches. with_coordinates: whether to yield the coordinates of each patch, default to `True`. - .. deprecated:: 0.8.0 - ``dataset`` is deprecated, use ``data`` instead. - """ def __init__( @@ -253,9 +250,6 @@ class PatchDataset(Dataset): >>> torch.Size([2, 1, 3, 3]) - .. deprecated:: 0.8.0 - ``dataset`` is deprecated, use ``data`` instead. - """ def __init__( diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 3ae4066eae..ce77c549ce 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -23,7 +23,7 @@ import numpy as np from torch.utils.data._utils.collate import np_str_obj_array_pattern -from monai.config import DtypeLike, KeysCollection, PathLike +from monai.config import KeysCollection, PathLike from monai.data.utils import ( affine_to_spacing, correct_nifti_header_if_necessary, @@ -31,7 +31,7 @@ is_supported_format, orientation_ras_lps, ) -from monai.utils import MetaKeys, SpaceKeys, TraceKeys, deprecated_arg, ensure_tuple, optional_import, require_pkg +from monai.utils import MetaKeys, SpaceKeys, TraceKeys, ensure_tuple, optional_import, require_pkg if TYPE_CHECKING: import itk @@ -861,20 +861,17 @@ class NibabelReader(ImageReader): """ - @deprecated_arg("dtype", since="1.0", msg_suffix="please modify dtype of the returned by ``get_data`` instead.") def __init__( self, channel_dim: str | int | None = None, as_closest_canonical: bool = False, squeeze_non_spatial_dims: bool = False, - dtype: DtypeLike = np.float32, **kwargs, ): super().__init__() self.channel_dim = float("nan") if channel_dim == "no_channel" else channel_dim self.as_closest_canonical = as_closest_canonical self.squeeze_non_spatial_dims = squeeze_non_spatial_dims - self.dtype = dtype # deprecated self.kwargs = kwargs def verify_suffix(self, filename: Sequence[PathLike] | PathLike) -> bool: diff --git a/monai/data/iterable_dataset.py b/monai/data/iterable_dataset.py index 4c476b2f9d..3ee70bf69e 100644 --- a/monai/data/iterable_dataset.py +++ b/monai/data/iterable_dataset.py @@ -195,9 +195,6 @@ class CSVIterableDataset(IterableDataset): kwargs_read_csv: dictionary args to pass to pandas `read_csv` function. Default to ``{"chunksize": chunksize}``. kwargs: additional arguments for `pandas.merge()` API to join tables. - .. deprecated:: 0.8.0 - ``filename`` is deprecated, use ``src`` instead. - """ def __init__( @@ -223,8 +220,6 @@ def __init__( self.shuffle = shuffle self.seed = seed self.kwargs_read_csv = kwargs_read_csv or {"chunksize": chunksize} - # in case treating deprecated arg `filename` as kwargs, remove it from `kwargs` - kwargs.pop("filename", None) self.kwargs = kwargs self.iters: list[Iterable] = self.reset() diff --git a/monai/engines/__init__.py b/monai/engines/__init__.py index 9e425ccbe2..d8dc51f620 100644 --- a/monai/engines/__init__.py +++ b/monai/engines/__init__.py @@ -12,7 +12,6 @@ from __future__ import annotations from .evaluator import EnsembleEvaluator, Evaluator, SupervisedEvaluator -from .multi_gpu_supervised_trainer import create_multigpu_supervised_evaluator, create_multigpu_supervised_trainer from .trainer import GanTrainer, SupervisedTrainer, Trainer from .utils import ( IterationEvents, diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index 7c6ddd5bdd..736cde8b88 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -22,7 +22,7 @@ from monai.inferers import Inferer, SimpleInferer from monai.networks.utils import eval_mode, train_mode from monai.transforms import Transform -from monai.utils import ForwardMode, deprecated, ensure_tuple, min_version, optional_import +from monai.utils import ForwardMode, ensure_tuple, min_version, optional_import from monai.utils.enums import CommonKeys as Keys from monai.utils.enums import EngineStatsKeys as ESKeys from monai.utils.module import look_up_option @@ -167,10 +167,6 @@ def get_stats(self, *vars): stats[k] = getattr(self.state, k, None) return stats - @deprecated(since="0.9", msg_suffix="please use the `get_stats()` API instead.") - def get_validation_stats(self): - return {"best_validation_metric": self.state.best_metric, "best_validation_epoch": self.state.best_metric_epoch} - class SupervisedEvaluator(Evaluator): """ diff --git a/monai/engines/multi_gpu_supervised_trainer.py b/monai/engines/multi_gpu_supervised_trainer.py deleted file mode 100644 index cb6bbe86cd..0000000000 --- a/monai/engines/multi_gpu_supervised_trainer.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from collections.abc import Callable, Sequence -from typing import TYPE_CHECKING - -import torch.nn -from torch.nn.parallel import DataParallel, DistributedDataParallel -from torch.optim.optimizer import Optimizer - -from monai.config import IgniteInfo -from monai.engines.utils import get_devices_spec -from monai.utils import deprecated, min_version, optional_import - -create_supervised_trainer, _ = optional_import( - "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "create_supervised_trainer" -) -create_supervised_evaluator, _ = optional_import( - "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "create_supervised_evaluator" -) -_prepare_batch, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "_prepare_batch") -if TYPE_CHECKING: - from ignite.engine import Engine - from ignite.metrics import Metric -else: - Engine, _ = optional_import( - "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator" - ) - Metric, _ = optional_import( - "ignite.metrics", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Metric", as_type="decorator" - ) - -__all__ = ["create_multigpu_supervised_trainer", "create_multigpu_supervised_evaluator"] - - -def _default_transform(_x: torch.Tensor, _y: torch.Tensor, _y_pred: torch.Tensor, loss: torch.Tensor) -> float: - return loss.item() - - -def _default_eval_transform( - x: torch.Tensor, y: torch.Tensor, y_pred: torch.Tensor -) -> tuple[torch.Tensor, torch.Tensor]: - return y_pred, y - - -@deprecated( - since="1.1", - removed="1.3", - msg_suffix=("Native ignite engine lacks support of many MONAI features, please use `SupervisedTrainer` instead."), -) -def create_multigpu_supervised_trainer( - net: torch.nn.Module, - optimizer: Optimizer, - loss_fn: Callable, - devices: Sequence[str | torch.device] | None = None, - non_blocking: bool = False, - prepare_batch: Callable = _prepare_batch, - output_transform: Callable = _default_transform, - distributed: bool = False, -) -> Engine: - """ - Derived from `create_supervised_trainer` in Ignite. - - Factory function for creating a trainer for supervised models. - - Args: - net: the network to train. - optimizer: the optimizer to use. - loss_fn: the loss function to use. - devices: device(s) type specification (default: None). - Applies to both model and batches. None is all devices used, empty list is CPU only. - non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously - with respect to the host. For other cases, this argument has no effect. - prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs - tuple of tensors `(batch_x, batch_y)`. - output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value - to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. - distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain - only 1 GPU or CPU for current distributed rank. - - Returns: - Engine: a trainer engine with supervised update function. - - Note: - `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss - of the processed batch by default. - """ - - devices_ = get_devices_spec(devices) - if distributed: - if len(devices_) > 1: - raise ValueError(f"for distributed training, `devices` must contain only 1 GPU or CPU, but got {devices_}.") - net = DistributedDataParallel(net, device_ids=devices_) - elif len(devices_) > 1: - net = DataParallel(net) - - return create_supervised_trainer( # type: ignore[no-any-return] - model=net, - optimizer=optimizer, - loss_fn=loss_fn, - device=devices_[0], - non_blocking=non_blocking, - prepare_batch=prepare_batch, - output_transform=output_transform, - ) - - -@deprecated( - since="1.1", - removed="1.3", - msg_suffix=( - "Native ignite evaluator lacks support of many MONAI features, please use `SupervisedEvaluator` instead." - ), -) -def create_multigpu_supervised_evaluator( - net: torch.nn.Module, - metrics: dict[str, Metric] | None = None, - devices: Sequence[str | torch.device] | None = None, - non_blocking: bool = False, - prepare_batch: Callable = _prepare_batch, - output_transform: Callable = _default_eval_transform, - distributed: bool = False, -) -> Engine: - """ - Derived from `create_supervised_evaluator` in Ignite. - - Factory function for creating an evaluator for supervised models. - - Args: - net: the model to train. - metrics: a map of metric names to Metrics. - devices: device(s) type specification (default: None). - Applies to both model and batches. None is all devices used, empty list is CPU only. - non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously - with respect to the host. For other cases, this argument has no effect. - prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs - tuple of tensors `(batch_x, batch_y)`. - output_transform: function that receives 'x', 'y', 'y_pred' and returns value - to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` - which fits output expected by metrics. If you change it you should use `output_transform` in metrics. - distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain - only 1 GPU or CPU for current distributed rank. - - Note: - `engine.state.output` for this engine is defined by `output_transform` parameter and is - a tuple of `(batch_pred, batch_y)` by default. - - Returns: - Engine: an evaluator engine with supervised inference function. - """ - - devices_ = get_devices_spec(devices) - - if distributed: - net = DistributedDataParallel(net, device_ids=devices_) - if len(devices_) > 1: - raise ValueError( - f"for distributed evaluation, `devices` must contain only 1 GPU or CPU, but got {devices_}." - ) - elif len(devices_) > 1: - net = DataParallel(net) - - return create_supervised_evaluator( # type: ignore[no-any-return] - model=net, - metrics=metrics, - device=devices_[0], - non_blocking=non_blocking, - prepare_batch=prepare_batch, - output_transform=output_transform, - ) diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index b18050fd09..61b7028e11 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -22,7 +22,7 @@ from monai.engines.workflow import Workflow from monai.inferers import Inferer, SimpleInferer from monai.transforms import Transform -from monai.utils import GanKeys, deprecated, min_version, optional_import +from monai.utils import GanKeys, min_version, optional_import from monai.utils.enums import CommonKeys as Keys from monai.utils.enums import EngineStatsKeys as ESKeys @@ -74,10 +74,6 @@ def get_stats(self, *vars): stats[k] = getattr(self.state, k, None) return stats - @deprecated(since="0.9", msg_suffix="please use the `get_stats()` API instead.") - def get_train_stats(self): - return self.get_stats() - class SupervisedTrainer(Trainer): """ diff --git a/monai/handlers/mean_iou.py b/monai/handlers/mean_iou.py index 894a9185a3..69f11a8749 100644 --- a/monai/handlers/mean_iou.py +++ b/monai/handlers/mean_iou.py @@ -48,7 +48,7 @@ def __init__( default to True, will save to `engine.state.metric_details` dict with the metric name as key. See also: - :py:meth:`monai.metrics.meaniou.compute_meaniou` + :py:meth:`monai.metrics.meaniou.compute_iou` """ metric_fn = MeanIoU(include_background=include_background, reduction=reduction) super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details) diff --git a/monai/handlers/stats_handler.py b/monai/handlers/stats_handler.py index 4e4ad78798..c49fcda819 100644 --- a/monai/handlers/stats_handler.py +++ b/monai/handlers/stats_handler.py @@ -20,7 +20,7 @@ from monai.apps import get_logger from monai.config import IgniteInfo -from monai.utils import deprecated_arg_default, is_scalar, min_version, optional_import +from monai.utils import is_scalar, min_version, optional_import Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events") if TYPE_CHECKING: @@ -66,7 +66,6 @@ class StatsHandler: """ - @deprecated_arg_default("name", old_default=None, new_default="StatsHandler", since="1.1", replaced="1.3") def __init__( self, iteration_log: bool | Callable[[Engine, int], bool] = True, @@ -76,7 +75,7 @@ def __init__( output_transform: Callable = lambda x: x[0], global_epoch_transform: Callable = lambda x: x, state_attributes: Sequence[str] | None = None, - name: str | None = None, + name: str | None = "StatsHandler", tag_name: str = DEFAULT_TAG, key_var_format: str = DEFAULT_KEY_VAL_FORMAT, ) -> None: diff --git a/monai/handlers/tensorboard_handlers.py b/monai/handlers/tensorboard_handlers.py index 9b80f45a41..7b7e3968fb 100644 --- a/monai/handlers/tensorboard_handlers.py +++ b/monai/handlers/tensorboard_handlers.py @@ -19,7 +19,7 @@ import torch from monai.config import IgniteInfo -from monai.utils import deprecated_arg, is_scalar, min_version, optional_import +from monai.utils import is_scalar, min_version, optional_import from monai.visualize import plot_2d_or_3d_image Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events") @@ -87,8 +87,6 @@ class TensorBoardStatsHandler(TensorBoardHandler): """ - @deprecated_arg("epoch_interval", since="1.1", removed="1.3") - @deprecated_arg("iteration_interval", since="1.1", removed="1.3") def __init__( self, summary_writer: SummaryWriter | SummaryWriterX | None = None, @@ -96,9 +94,7 @@ def __init__( iteration_log: bool | Callable[[Engine, int], bool] | int = True, epoch_log: bool | Callable[[Engine, int], bool] | int = True, epoch_event_writer: Callable[[Engine, Any], Any] | None = None, - epoch_interval: int = 1, iteration_event_writer: Callable[[Engine, Any], Any] | None = None, - iteration_interval: int = 1, output_transform: Callable = lambda x: x[0], global_epoch_transform: Callable = lambda x: x, state_attributes: Sequence[str] | None = None, @@ -120,12 +116,8 @@ def __init__( See ``iteration_log`` argument for more details. epoch_event_writer: customized callable TensorBoard writer for epoch level. Must accept parameter "engine" and "summary_writer", use default event writer if None. - epoch_interval: the epoch interval at which the epoch_event_writer is called. Defaults to 1. - ``epoch_interval`` must be 1 if ``epoch_log`` is callable. iteration_event_writer: customized callable TensorBoard writer for iteration level. Must accept parameter "engine" and "summary_writer", use default event writer if None. - iteration_interval: the iteration interval at which the iteration_event_writer is called. Defaults to 1. - ``iteration_interval`` must be 1 if ``iteration_log`` is callable. output_transform: a callable that is used to transform the ``ignite.engine.state.output`` into a scalar to plot, or a dictionary of {key: scalar}. In the latter case, the output string will be formatted as key: value. @@ -142,19 +134,12 @@ def __init__( when epoch completed. tag_name: when iteration output is a scalar, tag_name is used to plot, defaults to ``'Loss'``. """ - if callable(iteration_log) and iteration_interval > 1: - raise ValueError("If iteration_log is callable, then iteration_interval should be 1") - - if callable(epoch_log) and epoch_interval > 1: - raise ValueError("If epoch_log is callable, then epoch_interval should be 1") super().__init__(summary_writer=summary_writer, log_dir=log_dir) self.iteration_log = iteration_log self.epoch_log = epoch_log self.epoch_event_writer = epoch_event_writer - self.epoch_interval = epoch_interval self.iteration_event_writer = iteration_event_writer - self.iteration_interval = iteration_interval self.output_transform = output_transform self.global_epoch_transform = global_epoch_transform self.state_attributes = state_attributes @@ -172,15 +157,15 @@ def attach(self, engine: Engine) -> None: event = Events.ITERATION_COMPLETED if callable(self.iteration_log): # substitute event with new one using filter callable event = event(event_filter=self.iteration_log) - elif self.iteration_interval > 1: - event = event(every=self.iteration_interval) + elif self.iteration_log > 1: + event = event(every=self.iteration_log) engine.add_event_handler(event, self.iteration_completed) if self.epoch_log and not engine.has_event_handler(self.epoch_completed, Events.EPOCH_COMPLETED): event = Events.EPOCH_COMPLETED if callable(self.epoch_log): # substitute event with new one using filter callable event = event(event_filter=self.epoch_log) elif self.epoch_log > 1: - event = event(every=self.epoch_interval) + event = event(every=self.epoch_log) engine.add_event_handler(event, self.epoch_completed) def epoch_completed(self, engine: Engine) -> None: diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py index a74f303ec6..d8b2c33e62 100644 --- a/monai/losses/contrastive.py +++ b/monai/losses/contrastive.py @@ -39,10 +39,6 @@ def __init__(self, temperature: float = 0.5, batch_size: int = -1) -> None: ValueError: When an input of dimension length > 2 is passed ValueError: When input and target are of different shapes - .. deprecated:: 0.8.0 - - `reduction` is no longer supported. - """ super().__init__() self.temperature = temperature diff --git a/monai/metrics/__init__.py b/monai/metrics/__init__.py index 3809f59d2d..201acdfa50 100644 --- a/monai/metrics/__init__.py +++ b/monai/metrics/__init__.py @@ -21,7 +21,7 @@ from .hausdorff_distance import HausdorffDistanceMetric, compute_hausdorff_distance, compute_percent_hausdorff_distance from .loss_metric import LossMetric from .meandice import DiceHelper, DiceMetric, compute_dice -from .meaniou import MeanIoU, compute_iou, compute_meaniou +from .meaniou import MeanIoU, compute_iou from .metric import Cumulative, CumulativeIterationMetric, IterationMetric, Metric from .mmd import MMDMetric, compute_mmd from .panoptic_quality import PanopticQualityMetric, compute_panoptic_quality diff --git a/monai/metrics/meaniou.py b/monai/metrics/meaniou.py index 1cb22a67be..7d1ae49f25 100644 --- a/monai/metrics/meaniou.py +++ b/monai/metrics/meaniou.py @@ -14,7 +14,7 @@ import torch from monai.metrics.utils import do_metric_reduction, ignore_background -from monai.utils import MetricReduction, deprecated +from monai.utils import MetricReduction from .metric import CumulativeIterationMetric @@ -85,7 +85,7 @@ def aggregate( self, reduction: MetricReduction | str | None = None ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: """ - Execute reduction logic for the output of `compute_meaniou`. + Execute reduction logic for the output of `compute_iou`. Args: reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values, @@ -148,8 +148,3 @@ def compute_iou( if ignore_empty: return torch.where(y_o > 0, (intersection) / union, torch.tensor(float("nan"), device=y_o.device)) return torch.where(union > 0, (intersection) / union, torch.tensor(1.0, device=y_o.device)) - - -@deprecated(since="1.0.0", msg_suffix="use `compute_iou` instead.") -def compute_meaniou(*args, **kwargs): - return compute_iou(*args, **kwargs) diff --git a/monai/metrics/rocauc.py b/monai/metrics/rocauc.py index 56d9faa9dd..9280c54342 100644 --- a/monai/metrics/rocauc.py +++ b/monai/metrics/rocauc.py @@ -175,7 +175,7 @@ def compute_roc_auc( if average == Average.NONE: return auc_values if average == Average.MACRO: - return np.mean(auc_values) + return np.mean(auc_values) # type: ignore[no-any-return] if average == Average.WEIGHTED: weights = [sum(y_) for y_ in y] return np.average(auc_values, weights=weights) # type: ignore[no-any-return] diff --git a/monai/networks/blocks/patchembedding.py b/monai/networks/blocks/patchembedding.py index f6d390692e..7d56045814 100644 --- a/monai/networks/blocks/patchembedding.py +++ b/monai/networks/blocks/patchembedding.py @@ -42,7 +42,9 @@ class PatchEmbeddingBlock(nn.Module): """ - @deprecated_arg(name="pos_embed", since="1.2", new_name="proj_type", msg_suffix="please use `proj_type` instead.") + @deprecated_arg( + name="pos_embed", since="1.2", removed="1.4", new_name="proj_type", msg_suffix="please use `proj_type` instead." + ) def __init__( self, in_channels: int, diff --git a/monai/networks/nets/unet.py b/monai/networks/nets/unet.py index a48aabf915..7b16b6c923 100644 --- a/monai/networks/nets/unet.py +++ b/monai/networks/nets/unet.py @@ -96,9 +96,6 @@ class UNet(nn.Module): strides=(2, 2, 2, 2), ) - .. deprecated:: 0.6.0 - ``dimensions`` is deprecated, use ``spatial_dims`` instead. - Note: The acceptable spatial size of input data depends on the parameters of the network, to set appropriate spatial size, please check the tutorial for more details: https://github.com/Project-MONAI/tutorials/blob/master/modules/UNet_input_size_constrains.ipynb. diff --git a/monai/networks/nets/unetr.py b/monai/networks/nets/unetr.py index bfcd6e7d47..a88e5a92fd 100644 --- a/monai/networks/nets/unetr.py +++ b/monai/networks/nets/unetr.py @@ -27,7 +27,9 @@ class UNETR(nn.Module): UNETR: Transformers for 3D Medical Image Segmentation " """ - @deprecated_arg(name="pos_embed", since="1.2", new_name="proj_type", msg_suffix="please use `proj_type` instead.") + @deprecated_arg( + name="pos_embed", since="1.2", removed="1.4", new_name="proj_type", msg_suffix="please use `proj_type` instead." + ) def __init__( self, in_channels: int, diff --git a/monai/networks/nets/vit.py b/monai/networks/nets/vit.py index f033d7ff4a..4eada6aa76 100644 --- a/monai/networks/nets/vit.py +++ b/monai/networks/nets/vit.py @@ -31,7 +31,9 @@ class ViT(nn.Module): ViT supports Torchscript but only works for Pytorch after 1.8. """ - @deprecated_arg(name="pos_embed", since="1.2", new_name="proj_type", msg_suffix="please use `proj_type` instead.") + @deprecated_arg( + name="pos_embed", since="1.2", removed="1.4", new_name="proj_type", msg_suffix="please use `proj_type` instead." + ) def __init__( self, in_channels: int, diff --git a/monai/networks/nets/vitautoenc.py b/monai/networks/nets/vitautoenc.py index 59aae2d54a..d69f5df4be 100644 --- a/monai/networks/nets/vitautoenc.py +++ b/monai/networks/nets/vitautoenc.py @@ -33,7 +33,9 @@ class ViTAutoEnc(nn.Module): Modified to also give same dimension outputs as the input size of the image """ - @deprecated_arg(name="pos_embed", since="1.2", new_name="proj_type", msg_suffix="please use `proj_type` instead.") + @deprecated_arg( + name="pos_embed", since="1.2", removed="1.4", new_name="proj_type", msg_suffix="please use `proj_type` instead." + ) def __init__( self, in_channels: int, diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 51fd5c6288..1d308b0ed4 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -471,10 +471,8 @@ from .traits import LazyTrait, MultiSampleTrait, RandomizableTrait, ThreadUnsafe from .transform import LazyTransform, MapTransform, Randomizable, RandomizableTransform, Transform, apply_transform from .utility.array import ( - AddChannel, AddCoordinateChannels, AddExtremePointsChannel, - AsChannelFirst, AsChannelLast, CastToType, ClassesToIndices, @@ -497,7 +495,6 @@ RemoveRepeatedChannel, RepeatChannel, SimulateDelay, - SplitChannel, SplitDim, SqueezeDim, ToCupy, @@ -509,18 +506,12 @@ Transpose, ) from .utility.dictionary import ( - AddChanneld, - AddChannelD, - AddChannelDict, AddCoordinateChannelsd, AddCoordinateChannelsD, AddCoordinateChannelsDict, AddExtremePointsChanneld, AddExtremePointsChannelD, AddExtremePointsChannelDict, - AsChannelFirstd, - AsChannelFirstD, - AsChannelFirstDict, AsChannelLastd, AsChannelLastD, AsChannelLastDict, @@ -602,9 +593,6 @@ SimulateDelayd, SimulateDelayD, SimulateDelayDict, - SplitChanneld, - SplitChannelD, - SplitChannelDict, SplitDimd, SplitDimD, SplitDimDict, diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index 1c84c473b5..6a3798e7ba 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -591,13 +591,12 @@ class RandSpatialCrop(Randomizable, Crop): lazy: a flag to indicate whether this transform should execute lazily or not. Defaults to False. """ - @deprecated_arg_default("random_size", True, False, since="1.1", replaced="1.3") def __init__( self, roi_size: Sequence[int] | int, max_roi_size: Sequence[int] | int | None = None, random_center: bool = True, - random_size: bool = True, + random_size: bool = False, lazy: bool = False, ) -> None: super().__init__(lazy) @@ -662,13 +661,12 @@ class RandScaleCrop(RandSpatialCrop): lazy: a flag to indicate whether this transform should execute lazily or not. Defaults to False. """ - @deprecated_arg_default("random_size", True, False, since="1.1", replaced="1.3") def __init__( self, roi_scale: Sequence[float] | float, max_roi_scale: Sequence[float] | float | None = None, random_center: bool = True, - random_size: bool = True, + random_size: bool = False, lazy: bool = False, ) -> None: super().__init__( @@ -737,14 +735,13 @@ class RandSpatialCropSamples(Randomizable, TraceableTransform, LazyTransform, Mu backend = RandSpatialCrop.backend - @deprecated_arg_default("random_size", True, False, since="1.1", replaced="1.3") def __init__( self, roi_size: Sequence[int] | int, num_samples: int, max_roi_size: Sequence[int] | int | None = None, random_center: bool = True, - random_size: bool = True, + random_size: bool = False, lazy: bool = False, ) -> None: LazyTransform.__init__(self, lazy) @@ -819,7 +816,7 @@ def threshold_at_one(x): """ - @deprecated_arg_default("allow_smaller", old_default=True, new_default=False, since="1.2", replaced="1.3") + @deprecated_arg_default("allow_smaller", old_default=True, new_default=False, since="1.2", replaced="1.5") def __init__( self, select_fn: Callable = is_positive, diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index da3c38f0a2..56d214c51d 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -559,14 +559,13 @@ class RandSpatialCropd(RandCropd): lazy: a flag to indicate whether this transform should execute lazily or not. Defaults to False. """ - @deprecated_arg_default("random_size", True, False, since="1.1", replaced="1.3") def __init__( self, keys: KeysCollection, roi_size: Sequence[int] | int, max_roi_size: Sequence[int] | int | None = None, random_center: bool = True, - random_size: bool = True, + random_size: bool = False, allow_missing_keys: bool = False, lazy: bool = False, ) -> None: @@ -603,14 +602,13 @@ class RandScaleCropd(RandCropd): lazy: a flag to indicate whether this transform should execute lazily or not. Defaults to False. """ - @deprecated_arg_default("random_size", True, False, since="1.1", replaced="1.3") def __init__( self, keys: KeysCollection, roi_scale: Sequence[float] | float, max_roi_scale: Sequence[float] | float | None = None, random_center: bool = True, - random_size: bool = True, + random_size: bool = False, allow_missing_keys: bool = False, lazy: bool = False, ) -> None: @@ -660,7 +658,6 @@ class RandSpatialCropSamplesd(Randomizable, MapTransform, LazyTransform, MultiSa backend = RandSpatialCropSamples.backend - @deprecated_arg_default("random_size", True, False, since="1.1", replaced="1.3") def __init__( self, keys: KeysCollection, @@ -668,7 +665,7 @@ def __init__( num_samples: int, max_roi_size: Sequence[int] | int | None = None, random_center: bool = True, - random_size: bool = True, + random_size: bool = False, allow_missing_keys: bool = False, lazy: bool = False, ) -> None: @@ -722,7 +719,7 @@ class CropForegroundd(Cropd): for more information. """ - @deprecated_arg_default("allow_smaller", old_default=True, new_default=False, since="1.2", replaced="1.3") + @deprecated_arg_default("allow_smaller", old_default=True, new_default=False, since="1.2", replaced="1.5") def __init__( self, keys: KeysCollection, diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index bf39116783..b36c011822 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -45,14 +45,7 @@ from monai.transforms.utility.array import EnsureChannelFirst from monai.utils import GridSamplePadMode from monai.utils import ImageMetaKey as Key -from monai.utils import ( - OptionalImportError, - convert_to_dst_type, - deprecated_arg_default, - ensure_tuple, - look_up_option, - optional_import, -) +from monai.utils import OptionalImportError, convert_to_dst_type, ensure_tuple, look_up_option, optional_import nib, _ = optional_import("nibabel") Image, _ = optional_import("PIL.Image") @@ -127,11 +120,10 @@ class LoadImage(Transform): """ - @deprecated_arg_default("image_only", False, True, since="1.1", replaced="1.3") def __init__( self, reader=None, - image_only: bool = False, + image_only: bool = True, dtype: DtypeLike | None = np.float32, ensure_channel_first: bool = False, simple_keys: bool = False, @@ -380,14 +372,13 @@ class SaveImage(Transform): to where the input image has been saved. """ - @deprecated_arg_default("resample", True, False, since="1.1", replaced="1.3") def __init__( self, output_dir: PathLike = "./", output_postfix: str = "trans", output_ext: str = ".nii.gz", output_dtype: DtypeLike | None = np.float32, - resample: bool = True, + resample: bool = False, mode: str = "nearest", padding_mode: str = GridSamplePadMode.BORDER, scale: int | None = None, diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index b343faadb0..a1578d4fe3 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -29,7 +29,6 @@ from monai.transforms.io.array import LoadImage, SaveImage from monai.transforms.transform import MapTransform, Transform from monai.utils import GridSamplePadMode, ensure_tuple, ensure_tuple_rep -from monai.utils.deprecate_utils import deprecated_arg_default from monai.utils.enums import PostFix __all__ = ["LoadImaged", "LoadImageD", "LoadImageDict", "SaveImaged", "SaveImageD", "SaveImageDict"] @@ -72,7 +71,6 @@ class LoadImaged(MapTransform): """ - @deprecated_arg_default("image_only", False, True, since="1.1", replaced="1.3") def __init__( self, keys: KeysCollection, @@ -81,7 +79,7 @@ def __init__( meta_keys: KeysCollection | None = None, meta_key_postfix: str = DEFAULT_POST_FIX, overwriting: bool = False, - image_only: bool = False, + image_only: bool = True, ensure_channel_first: bool = False, simple_keys: bool = False, prune_meta_pattern: str | None = None, @@ -259,7 +257,6 @@ class SaveImaged(MapTransform): to where the input image has been saved. """ - @deprecated_arg_default("resample", True, False, since="1.1", replaced="1.3") def __init__( self, keys: KeysCollection, @@ -268,7 +265,7 @@ def __init__( output_dir: Path | str = "./", output_postfix: str = "trans", output_ext: str = ".nii.gz", - resample: bool = True, + resample: bool = False, mode: str = "nearest", padding_mode: str = GridSamplePadMode.BORDER, scale: int | None = None, diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 27444b3fb0..9d55aa013b 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -72,7 +72,6 @@ issequenceiterable, optional_import, ) -from monai.utils.deprecate_utils import deprecated_arg from monai.utils.enums import GridPatchSort, PatchKeys, TraceKeys, TransformBackends from monai.utils.misc import ImageMetaKey as Key from monai.utils.module import look_up_option @@ -432,11 +431,9 @@ def lazy(self, val: bool) -> None: self._lazy = val self.sp_resample.lazy = val - @deprecated_arg(name="affine", since="0.9", msg_suffix="Not needed, input should be `MetaTensor`.") def __call__( self, data_array: torch.Tensor, - affine: NdarrayOrTensor | None = None, mode: str | int | None = None, padding_mode: str | None = None, align_corners: bool | None = None, @@ -492,9 +489,7 @@ def __call__( if sr <= 0: raise ValueError(f"data_array must have at least one spatial dimension, got {original_spatial_shape}.") affine_: np.ndarray - if affine is not None: - warnings.warn("arg `affine` is deprecated, the affine of MetaTensor in data_array has higher priority.") - input_affine = data_array.peek_pending_affine() if isinstance(data_array, MetaTensor) else affine + input_affine = data_array.peek_pending_affine() if isinstance(data_array, MetaTensor) else None if input_affine is None: warnings.warn("`data_array` is not of type MetaTensor, assuming affine to be identity.") # default to identity diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 54fcdc8d59..ef2a714a25 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -59,7 +59,6 @@ convert_to_cupy, convert_to_numpy, convert_to_tensor, - deprecated, ensure_tuple, look_up_option, min_version, @@ -76,16 +75,13 @@ __all__ = [ "Identity", "RandIdentity", - "AsChannelFirst", "AsChannelLast", - "AddChannel", "AddCoordinateChannels", "EnsureChannelFirst", "EnsureType", "RepeatChannel", "RemoveRepeatedChannel", "SplitDim", - "SplitChannel", "CastToType", "ToTensor", "ToNumpy", @@ -233,54 +229,6 @@ def __call__(self, img: torch.Tensor, meta_dict: Mapping | None = None) -> torch return convert_to_tensor(result, track_meta=get_track_meta()) # type: ignore -@deprecated( - since="0.8", - removed="1.3", - msg_suffix="please use MetaTensor data type and monai.transforms.EnsureChannelFirst instead.", -) -class AsChannelFirst(EnsureChannelFirst): - """ - Change the channel dimension of the image to the first dimension. - Most of the image transformations in ``monai.transforms`` - assume the input image is in the channel-first format, which has the shape - (num_channels, spatial_dim_1[, spatial_dim_2, ...]). - This transform could be used to convert, for example, a channel-last image array in shape - (spatial_dim_1[, spatial_dim_2, ...], num_channels) into the channel-first format, - so that the multidimensional image array can be correctly interpreted by the other transforms. - Args: - channel_dim: which dimension of input image is the channel, default is the last dimension. - """ - - def __init__(self, channel_dim: int = -1) -> None: - super().__init__(channel_dim=channel_dim) - - -@deprecated( - since="0.8", - removed="1.3", - msg_suffix="please use MetaTensor data type and monai.transforms.EnsureChannelFirst instead" - " with `channel_dim='no_channel'`.", -) -class AddChannel(EnsureChannelFirst): - """ - Adds a 1-length channel dimension to the input image. - - Most of the image transformations in ``monai.transforms`` - assumes the input image is in the channel-first format, which has the shape - (num_channels, spatial_dim_1[, spatial_dim_2, ...]). - - This transform could be used, for example, to convert a (spatial_dim_1[, spatial_dim_2, ...]) - spatial image into the channel-first format so that the - multidimensional image array can be correctly interpreted by the other - transforms. - """ - - backend = [TransformBackends.TORCH, TransformBackends.NUMPY] - - def __init__(self) -> None: - super().__init__(channel_dim="no_channel") - - class RepeatChannel(Transform): """ Repeat channel data to construct expected input shape for models. @@ -381,23 +329,6 @@ def __call__(self, img: torch.Tensor) -> list[torch.Tensor]: return outputs -@deprecated(since="0.8", removed="1.3", msg_suffix="please use `SplitDim` instead.") -class SplitChannel(SplitDim): - """ - Split Numpy array or PyTorch Tensor data according to the channel dim. - It can help applying different following transforms to different channels. - - Note: `torch.split`/`np.split` is used, so the outputs are views of the input (shallow copy). - - Args: - channel_dim: which dimension of input image is the channel, default to 0. - - """ - - def __init__(self, channel_dim: int = 0) -> None: - super().__init__(channel_dim) - - class CastToType(Transform): """ Cast the Numpy data to specified numpy data type, or cast the PyTorch Tensor to @@ -1433,9 +1364,6 @@ class AddCoordinateChannels(Transform): appended to the input image. E.g., `(0, 1, 2)` represents `H, W, D` dims and append three channels to the input image, encoding the coordinates of the input's three spatial dimensions. - .. deprecated:: 0.8.0 - ``spatial_channels`` is deprecated, use ``spatial_dims`` instead. - """ backend = [TransformBackends.NUMPY] diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 7f4f22a475..82eb593b2c 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -65,23 +65,17 @@ ) from monai.transforms.utils import extreme_points_to_image, get_extreme_points from monai.transforms.utils_pytorch_numpy_unification import concatenate -from monai.utils import deprecated, deprecated_arg, ensure_tuple, ensure_tuple_rep +from monai.utils import ensure_tuple, ensure_tuple_rep from monai.utils.enums import PostFix, TraceKeys, TransformBackends from monai.utils.type_conversion import convert_to_dst_type __all__ = [ - "AddChannelD", - "AddChannelDict", - "AddChanneld", "AddCoordinateChannelsD", "AddCoordinateChannelsDict", "AddCoordinateChannelsd", "AddExtremePointsChannelD", "AddExtremePointsChannelDict", "AddExtremePointsChanneld", - "AsChannelFirstD", - "AsChannelFirstDict", - "AsChannelFirstd", "AsChannelLastD", "AsChannelLastDict", "AsChannelLastd", @@ -156,9 +150,6 @@ "SimulateDelayD", "SimulateDelayDict", "SimulateDelayd", - "SplitChannelD", - "SplitChannelDict", - "SplitChanneld", "SplitDimD", "SplitDimDict", "SplitDimd", @@ -251,16 +242,8 @@ class EnsureChannelFirstd(MapTransform): backend = EnsureChannelFirst.backend - @deprecated_arg(name="meta_keys", since="0.9", msg_suffix="not needed if image is type `MetaTensor`.") - @deprecated_arg(name="meta_key_postfix", since="0.9", msg_suffix="not needed if image is type `MetaTensor`.") def __init__( - self, - keys: KeysCollection, - meta_keys: KeysCollection | None = None, - meta_key_postfix: str = DEFAULT_POST_FIX, - strict_check: bool = True, - allow_missing_keys: bool = False, - channel_dim=None, + self, keys: KeysCollection, strict_check: bool = True, allow_missing_keys: bool = False, channel_dim=None ) -> None: """ Args: @@ -275,60 +258,15 @@ def __init__( """ super().__init__(keys, allow_missing_keys) self.adjuster = EnsureChannelFirst(strict_check=strict_check, channel_dim=channel_dim) - self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys)) - self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys)) def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]: d = dict(data) - for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix): - d[key] = self.adjuster(d[key], d.get(meta_key or f"{key}_{meta_key_postfix}")) # type: ignore + for key in self.key_iterator(d): + meta_dict = d[key].meta if isinstance(d[key], MetaTensor) else None # type: ignore[attr-defined] + d[key] = self.adjuster(d[key], meta_dict) # type: ignore return d -@deprecated( - since="0.8", - removed="1.3", - msg_suffix="please use MetaTensor data type and monai.transforms.EnsureChannelFirstd instead.", -) -class AsChannelFirstd(EnsureChannelFirstd): - """ - Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelFirst`. - """ - - def __init__(self, keys: KeysCollection, channel_dim: int = -1, allow_missing_keys: bool = False) -> None: - """ - Args: - keys: keys of the corresponding items to be transformed. - See also: :py:class:`monai.transforms.compose.MapTransform` - channel_dim: which dimension of input image is the channel, default is the last dimension. - allow_missing_keys: don't raise exception if key is missing. - """ - super().__init__(keys=keys, channel_dim=channel_dim, allow_missing_keys=allow_missing_keys) - - -@deprecated( - since="0.8", - removed="1.3", - msg_suffix="please use MetaTensor data type and monai.transforms.EnsureChannelFirstd instead" - " with `channel_dim='no_channel'`.", -) -class AddChanneld(EnsureChannelFirstd): - """ - Dictionary-based wrapper of :py:class:`monai.transforms.AddChannel`. - """ - - backend = EnsureChannelFirstd.backend - - def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: - """ - Args: - keys: keys of the corresponding items to be transformed. - See also: :py:class:`monai.transforms.compose.MapTransform` - allow_missing_keys: don't raise exception if key is missing. - """ - super().__init__(keys, allow_missing_keys, channel_dim="no_channel") - - class RepeatChanneld(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RepeatChannel`. @@ -445,29 +383,6 @@ def __call__( return d -@deprecated(since="0.8", removed="1.3", msg_suffix="please use `SplitDimd` instead.") -class SplitChanneld(SplitDimd): - """ - Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`. - All the input specified by `keys` should be split into same count of data. - """ - - def __init__( - self, - keys: KeysCollection, - output_postfixes: Sequence[str] | None = None, - channel_dim: int = 0, - allow_missing_keys: bool = False, - ) -> None: - super().__init__( - keys, - output_postfixes=output_postfixes, - dim=channel_dim, - update_meta=False, # for backwards compatibility - allow_missing_keys=allow_missing_keys, - ) - - class CastToTyped(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.CastToType`. @@ -1725,9 +1640,6 @@ class AddCoordinateChannelsd(MapTransform): to the input image, encoding the coordinates of the input's three spatial dimensions. allow_missing_keys: don't raise exception if key is missing. - .. deprecated:: 0.8.0 - ``spatial_channels`` is deprecated, use ``spatial_dims`` instead. - """ backend = AddCoordinateChannels.backend @@ -1828,13 +1740,10 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N RandImageFilterD = RandImageFilterDict = RandImageFilterd ImageFilterD = ImageFilterDict = ImageFilterd IdentityD = IdentityDict = Identityd -AsChannelFirstD = AsChannelFirstDict = AsChannelFirstd AsChannelLastD = AsChannelLastDict = AsChannelLastd -AddChannelD = AddChannelDict = AddChanneld EnsureChannelFirstD = EnsureChannelFirstDict = EnsureChannelFirstd RemoveRepeatedChannelD = RemoveRepeatedChannelDict = RemoveRepeatedChanneld RepeatChannelD = RepeatChannelDict = RepeatChanneld -SplitChannelD = SplitChannelDict = SplitChanneld SplitDimD = SplitDimDict = SplitDimd CastToTypeD = CastToTypeDict = CastToTyped ToTensorD = ToTensorDict = ToTensord diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index b769b8da2f..258108904c 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -946,7 +946,7 @@ def _create_translate( return array_func(affine) # type: ignore -@deprecated_arg_default("allow_smaller", old_default=True, new_default=False, since="1.2", replaced="1.3") +@deprecated_arg_default("allow_smaller", old_default=True, new_default=False, since="1.2", replaced="1.5") def generate_spatial_bounding_box( img: NdarrayOrTensor, select_fn: Callable = is_positive, diff --git a/monai/utils/enums.py b/monai/utils/enums.py index 572cd9293d..409f979c56 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -603,7 +603,7 @@ class LabelStatsKeys(StrEnum): LABEL_NCOMP = "ncomponents" -@deprecated(since="1.2", msg_suffix="please use `AlgoKeys` instead.") +@deprecated(since="1.2", removed="1.4", msg_suffix="please use `AlgoKeys` instead.") class AlgoEnsembleKeys(StrEnum): """ Default keys for Mixed Ensemble diff --git a/tests/min_tests.py b/tests/min_tests.py index 4c4e374311..879cdc61b4 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -145,7 +145,6 @@ def run_testsuit(): "test_occlusion_sensitivity", "test_orientation", "test_orientationd", - "test_parallel_execution", "test_patchembedding", "test_persistentdataset", "test_pil_reader", @@ -193,7 +192,6 @@ def run_testsuit(): "test_zoom_affine", "test_zoomd", "test_prepare_batch_default_dist", - "test_parallel_execution_dist", "test_bundle_verify_metadata", "test_bundle_verify_net", "test_bundle_ckpt_export", diff --git a/tests/test_add_channeld.py b/tests/test_add_channeld.py deleted file mode 100644 index 0c0a1ffa49..0000000000 --- a/tests/test_add_channeld.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import unittest - -import numpy as np -from parameterized import parameterized - -from monai.transforms import AddChanneld -from tests.utils import TEST_NDARRAYS - -TESTS = [] -for p in TEST_NDARRAYS: - TESTS.append( - [ - {"keys": ["img", "seg"]}, - {"img": p(np.array([[0, 1], [1, 2]])), "seg": p(np.array([[0, 1], [1, 2]]))}, - (1, 2, 2), - ] - ) - - -class TestAddChanneld(unittest.TestCase): - @parameterized.expand(TESTS) - def test_shape(self, input_param, input_data, expected_shape): - result = AddChanneld(**input_param)(input_data) - self.assertEqual(result["img"].shape, expected_shape) - self.assertEqual(result["seg"].shape, expected_shape) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_arraydataset.py b/tests/test_arraydataset.py index 6f373cd9db..a3b78fc6e0 100644 --- a/tests/test_arraydataset.py +++ b/tests/test_arraydataset.py @@ -44,7 +44,7 @@ def __call__(self, input_, lazy): img = self.transforms[0](input_) metadata = img.meta img = self.transforms[1](img) - img = self.transforms[2](img, metadata["affine"], lazy=lazy) + img = self.transforms[2](img, lazy=lazy) metadata = img.meta return self.transforms[3](img), metadata diff --git a/tests/test_as_channel_first.py b/tests/test_as_channel_first.py deleted file mode 100644 index 3bf4e877ab..0000000000 --- a/tests/test_as_channel_first.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import unittest - -import numpy as np -from parameterized import parameterized - -from monai.transforms import AsChannelFirst -from monai.transforms.utils_pytorch_numpy_unification import moveaxis -from tests.utils import TEST_NDARRAYS, assert_allclose - -TESTS = [] -for p in TEST_NDARRAYS: - TESTS.append([p, {"channel_dim": -1}, (4, 1, 2, 3)]) - TESTS.append([p, {"channel_dim": 3}, (4, 1, 2, 3)]) - TESTS.append([p, {"channel_dim": 2}, (3, 1, 2, 4)]) - - -class TestAsChannelFirst(unittest.TestCase): - @parameterized.expand(TESTS) - def test_value(self, in_type, input_param, expected_shape): - test_data = in_type(np.random.randint(0, 2, size=[1, 2, 3, 4])) - result = AsChannelFirst(**input_param)(test_data) - self.assertTupleEqual(result.shape, expected_shape) - expected = moveaxis(test_data, input_param["channel_dim"], 0) - assert_allclose(result, expected, type_test="tensor") - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_as_channel_firstd.py b/tests/test_as_channel_firstd.py deleted file mode 100644 index bd50a96f5d..0000000000 --- a/tests/test_as_channel_firstd.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import unittest - -import numpy as np -from parameterized import parameterized - -from monai.transforms import AsChannelFirstd -from tests.utils import TEST_NDARRAYS - -TESTS = [] -for p in TEST_NDARRAYS: - TESTS.append([p, {"keys": ["image", "label", "extra"], "channel_dim": -1}, (4, 1, 2, 3)]) - TESTS.append([p, {"keys": ["image", "label", "extra"], "channel_dim": 3}, (4, 1, 2, 3)]) - TESTS.append([p, {"keys": ["image", "label", "extra"], "channel_dim": 2}, (3, 1, 2, 4)]) - - -class TestAsChannelFirstd(unittest.TestCase): - @parameterized.expand(TESTS) - def test_shape(self, in_type, input_param, expected_shape): - test_data = { - "image": in_type(np.random.randint(0, 2, size=[1, 2, 3, 4])), - "label": in_type(np.random.randint(0, 2, size=[1, 2, 3, 4])), - "extra": in_type(np.random.randint(0, 2, size=[1, 2, 3, 4])), - } - result = AsChannelFirstd(**input_param)(test_data) - self.assertTupleEqual(result["image"].shape, expected_shape) - self.assertTupleEqual(result["label"].shape, expected_shape) - self.assertTupleEqual(result["extra"].shape, expected_shape) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_compute_meaniou.py b/tests/test_compute_meaniou.py index be6f1a039f..d39edaa6f3 100644 --- a/tests/test_compute_meaniou.py +++ b/tests/test_compute_meaniou.py @@ -17,7 +17,7 @@ import torch from parameterized import parameterized -from monai.metrics import MeanIoU, compute_iou, compute_meaniou +from monai.metrics import MeanIoU, compute_iou _device = "cuda:0" if torch.cuda.is_available() else "cpu" # keep background @@ -189,7 +189,7 @@ class TestComputeMeanIoU(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_9, TEST_CASE_11, TEST_CASE_12]) def test_value(self, input_data, expected_value): - result = compute_meaniou(**input_data) + result = compute_iou(**input_data) np.testing.assert_allclose(result.cpu().numpy(), expected_value, atol=1e-4) np.testing.assert_equal(result.device, input_data["y_pred"].device) @@ -201,7 +201,7 @@ def test_nans(self, input_data, expected_value): # MeanIoU class tests @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_10]) def test_value_class(self, input_data, expected_value): - # same test as for compute_meaniou + # same test as for compute_iou vals = {} vals["y_pred"] = input_data.pop("y_pred") vals["y"] = input_data.pop("y") diff --git a/tests/test_dataset_summary.py b/tests/test_dataset_summary.py index b1cc578f32..87538425d5 100644 --- a/tests/test_dataset_summary.py +++ b/tests/test_dataset_summary.py @@ -54,7 +54,7 @@ def test_spacing_intensity(self): t = Compose( [ - LoadImaged(keys=["image", "label"]), + LoadImaged(keys=["image", "label"], image_only=False), ToNumpyd(keys=["image", "label", "image_meta_dict", "label_meta_dict"]), ] ) diff --git a/tests/test_handler_tb_stats.py b/tests/test_handler_tb_stats.py index f4ccfc9780..883827a1ac 100644 --- a/tests/test_handler_tb_stats.py +++ b/tests/test_handler_tb_stats.py @@ -36,14 +36,6 @@ def event_filter(_, event): @unittest.skipUnless(has_tb, "Requires SummaryWriter installation") class TestHandlerTBStats(unittest.TestCase): - def test_args_validation(self): - with self.assertWarns(FutureWarning): - with self.assertRaisesRegex(ValueError, expected_regex="iteration_interval should be 1"): - TensorBoardStatsHandler(log_dir=".", iteration_log=get_event_filter([1, 2]), iteration_interval=2) - - with self.assertRaisesRegex(ValueError, expected_regex="epoch_interval should be 1"): - TensorBoardStatsHandler(log_dir=".", epoch_log=get_event_filter([1, 2]), epoch_interval=2) - def test_metrics_print(self): with tempfile.TemporaryDirectory() as tempdir: # set up engine diff --git a/tests/test_image_dataset.py b/tests/test_image_dataset.py index 4df84d60d1..7f7bdec513 100644 --- a/tests/test_image_dataset.py +++ b/tests/test_image_dataset.py @@ -49,7 +49,7 @@ def __call__(self, data): class _TestCompose(Compose): def __call__(self, data, meta, lazy): data = self.transforms[0](data) # ensure channel first - data = self.transforms[1](data, data.meta["affine"], lazy=lazy) # spacing + data = self.transforms[1](data, lazy=lazy) # spacing meta = data.meta if len(self.transforms) == 3: return self.transforms[2](data), meta # image contrast diff --git a/tests/test_integration_bundle_run.py b/tests/test_integration_bundle_run.py index e6f3d6b8c6..fb9e0e75c6 100644 --- a/tests/test_integration_bundle_run.py +++ b/tests/test_integration_bundle_run.py @@ -67,7 +67,7 @@ def test_tiny(self): json.dump( { "trainer": {"_target_": "tests.test_integration_bundle_run._Runnable42", "val": 42}, - # keep this test case to cover the "runner_id" arg + # keep this test case to cover the "run_id" arg "training": "$@trainer.run()", }, f, @@ -106,7 +106,7 @@ def test_scripts_fold(self): "_target_": "tests.test_integration_bundle_run._Runnable43", "func": "$scripts.tiny_test", }, - # keep this test case to cover the "runner_id" arg + # keep this test case to cover the "run_id" arg "training": "$@trainer.run()", }, f, diff --git a/tests/test_nifti_rw.py b/tests/test_nifti_rw.py index cac18cf9e3..f45c2ac5a7 100644 --- a/tests/test_nifti_rw.py +++ b/tests/test_nifti_rw.py @@ -110,8 +110,7 @@ def test_consistency(self): np.set_printoptions(suppress=True, precision=3) test_image = make_nifti_image(np.arange(64).reshape(1, 8, 8), np.diag([1.5, 1.5, 1.5, 1])) data = LoadImage(image_only=True, reader="NibabelReader", as_closest_canonical=False)(test_image) - header = data.meta - data = Spacing([0.8, 0.8, 0.8])(data[None], header["affine"], mode="nearest") + data = Spacing([0.8, 0.8, 0.8])(data[None], mode="nearest") original_affine = data.meta["original_affine"] data = Orientation("ILP")(data) new_affine = data.affine diff --git a/tests/test_parallel_execution.py b/tests/test_parallel_execution.py deleted file mode 100644 index 26ec873950..0000000000 --- a/tests/test_parallel_execution.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import unittest -import warnings - -import torch - -from monai.engines import create_multigpu_supervised_trainer -from tests.utils import skip_if_no_cuda - - -def fake_loss(y_pred, y): - return (y_pred[0] + y).sum() - - -def fake_data_stream(): - while True: - yield torch.rand((10, 1, 64, 64)), torch.rand((10, 1, 64, 64)) - - -class TestParallelExecution(unittest.TestCase): - """ - Tests single GPU, multi GPU, and CPU execution with the Ignite supervised trainer. - """ - - @skip_if_no_cuda - def test_single_gpu(self): - device = torch.device("cuda:0") - net = torch.nn.Conv2d(1, 1, 3, padding=1).to(device) - opt = torch.optim.Adam(net.parameters(), 1e-3) - trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, [device]) - trainer.run(fake_data_stream(), 2, 2) - - @skip_if_no_cuda - def test_multi_gpu(self): - device = torch.device("cuda") - net = torch.nn.Conv2d(1, 1, 3, padding=1).to(device) - opt = torch.optim.Adam(net.parameters(), 1e-3) - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") # ignore warnings about imbalanced GPU memory - - trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, None) - - trainer.run(fake_data_stream(), 2, 2) - - def test_cpu(self): - net = torch.nn.Conv2d(1, 1, 3, padding=1) - opt = torch.optim.Adam(net.parameters(), 1e-3) - trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, []) - trainer.run(fake_data_stream(), 2, 2) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_parallel_execution_dist.py b/tests/test_parallel_execution_dist.py deleted file mode 100644 index b6f6695be4..0000000000 --- a/tests/test_parallel_execution_dist.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import unittest - -import torch -import torch.distributed as dist - -from monai.engines import create_multigpu_supervised_trainer -from tests.utils import DistCall, DistTestCase, skip_if_no_cuda - - -def fake_loss(y_pred, y): - return (y_pred[0] + y).sum() - - -def fake_data_stream(): - while True: - yield torch.rand((10, 1, 64, 64)), torch.rand((10, 1, 64, 64)) - - -class DistributedTestParallelExecution(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) - @skip_if_no_cuda - def test_distributed(self): - device = torch.device(f"cuda:{dist.get_rank()}") - net = torch.nn.Conv2d(1, 1, 3, padding=1).to(device) - opt = torch.optim.Adam(net.parameters(), 1e-3) - - trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, [device], distributed=True) - trainer.run(fake_data_stream(), 2, 2) - # assert the trainer output is loss value - self.assertTrue(isinstance(trainer.state.output, float)) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_rand_spatial_crop_samplesd.py b/tests/test_rand_spatial_crop_samplesd.py index ec0d63cc50..b37dacd643 100644 --- a/tests/test_rand_spatial_crop_samplesd.py +++ b/tests/test_rand_spatial_crop_samplesd.py @@ -22,7 +22,7 @@ from tests.utils import TEST_NDARRAYS_ALL, assert_allclose TEST_CASE_1 = [ - {"keys": ["img", "seg"], "num_samples": 4, "roi_size": [2, 2, 2], "random_center": True}, + {"keys": ["img", "seg"], "num_samples": 4, "roi_size": [2, 2, 2], "random_center": True, "random_size": True}, {"img": np.arange(81).reshape(3, 3, 3, 3), "seg": np.arange(81, 0, -1).reshape(3, 3, 3, 3)}, [(3, 2, 2, 2), (3, 2, 3, 3), (3, 2, 3, 2), (3, 2, 3, 2)], { @@ -47,7 +47,13 @@ for p in TEST_NDARRAYS_ALL: TEST_CASE_2.append( [ - {"keys": ["img", "seg"], "num_samples": 8, "roi_size": [2, 2, 3], "random_center": False}, + { + "keys": ["img", "seg"], + "num_samples": 8, + "roi_size": [2, 2, 3], + "random_center": False, + "random_size": True, + }, {"img": p(np.arange(81).reshape(3, 3, 3, 3)), "seg": p(np.arange(81, 0, -1).reshape(3, 3, 3, 3))}, [ (3, 2, 2, 3), diff --git a/tests/test_spacing.py b/tests/test_spacing.py index 0c3f67713d..1ff1518297 100644 --- a/tests/test_spacing.py +++ b/tests/test_spacing.py @@ -283,7 +283,7 @@ def test_spacing( tr = Spacing(**init_param) call_param = data_param.copy() call_param["data_array"] = img - res: MetaTensor = tr(**call_param) + res: MetaTensor = tr(**call_param) # type: ignore self.assertEqual(img.device, res.device) test_resampler_lazy(tr, res, init_param=init_param, call_param=call_param) @@ -306,7 +306,7 @@ def test_spacing_torch(self, pixdim, img, track_meta: bool): if track_meta: self.assertIsInstance(res, MetaTensor) - new_spacing = affine_to_spacing(res.affine, 3) + new_spacing = affine_to_spacing(res.affine, 3) # type: ignore assert_allclose(new_spacing, pixdim, type_test=False) self.assertNotEqual(img.shape, res.shape) test_resampler_lazy(tr, res, init_param=init_param, call_param=call_param) diff --git a/tests/test_split_channel.py b/tests/test_split_channel.py deleted file mode 100644 index 12b336431d..0000000000 --- a/tests/test_split_channel.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import unittest - -import numpy as np -from parameterized import parameterized - -from monai.transforms import SplitChannel -from tests.utils import TEST_NDARRAYS - -TESTS = [] -for p in TEST_NDARRAYS: - TESTS.append([{"channel_dim": 1}, p(np.random.randint(2, size=(4, 3, 3, 4))), (4, 1, 3, 4)]) - TESTS.append([{"channel_dim": 0}, p(np.random.randint(2, size=(3, 3, 4))), (1, 3, 4)]) - TESTS.append([{"channel_dim": 2}, p(np.random.randint(2, size=(3, 2, 4))), (3, 2, 1)]) - TESTS.append([{"channel_dim": -1}, p(np.random.randint(2, size=(3, 2, 4))), (3, 2, 1)]) - - -class TestSplitChannel(unittest.TestCase): - @parameterized.expand(TESTS) - def test_shape(self, input_param, test_data, expected_shape): - result = SplitChannel(**input_param)(test_data) - for data in result: - self.assertEqual(type(data), type(test_data)) - self.assertTupleEqual(data.shape, expected_shape) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_split_channeld.py b/tests/test_split_channeld.py deleted file mode 100644 index 63b4caf7a5..0000000000 --- a/tests/test_split_channeld.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import unittest - -import numpy as np -from parameterized import parameterized - -from monai.transforms import SplitChanneld -from tests.utils import TEST_NDARRAYS - -TESTS = [] -for p in TEST_NDARRAYS: - TESTS.append( - [ - {"keys": "pred", "output_postfixes": ["cls1", "cls2", "cls3"], "channel_dim": 1}, - {"pred": p(np.random.randint(2, size=(4, 3, 3, 4)))}, - (4, 1, 3, 4), - ] - ) - - TESTS.append( - [ - {"keys": "pred", "output_postfixes": ["cls1", "cls2", "cls3"], "channel_dim": 0}, - {"pred": p(np.random.randint(2, size=(3, 3, 4)))}, - (1, 3, 4), - ] - ) - - TESTS.append( - [ - {"keys": "pred", "output_postfixes": ["cls1", "cls2", "cls3", "cls4"], "channel_dim": 2}, - {"pred": p(np.random.randint(2, size=(3, 2, 4)))}, - (3, 2, 1), - ] - ) - - TESTS.append( - [ - {"keys": "pred", "output_postfixes": ["cls1", "cls2", "cls3", "cls4"], "channel_dim": -1}, - {"pred": p(np.random.randint(2, size=(3, 2, 4)))}, - (3, 2, 1), - ] - ) - - TESTS.append([{"keys": "pred", "channel_dim": 1}, {"pred": p(np.random.randint(2, size=(3, 2, 4)))}, (3, 1, 4)]) - - -class TestSplitChanneld(unittest.TestCase): - @parameterized.expand(TESTS) - def test_shape(self, input_param, test_data, expected_shape): - result = SplitChanneld(**input_param)(test_data) - for k, v in result.items(): - if "_" in k: - self.assertTupleEqual(v.shape, expected_shape) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/testing_data/transform_metatensor_cases.yaml b/tests/testing_data/transform_metatensor_cases.yaml index a639a13309..1c79d4e4ce 100644 --- a/tests/testing_data/transform_metatensor_cases.yaml +++ b/tests/testing_data/transform_metatensor_cases.yaml @@ -25,9 +25,11 @@ TEST_CASE_1: - _target_: RandSpatialCropD keys: "@input_keys" roi_size: [76, 87, 73] + random_size: True - _target_: RandScaleCropD keys: "@input_keys" roi_scale: 0.9 + random_size: True - _target_: ResizeWithPadOrCropD keys: "@input_keys" spatial_size: [32, 43, 54]