From c60bdc98da2c6234aa345a70d13346f78278c150 Mon Sep 17 00:00:00 2001 From: wdika Date: Tue, 30 Nov 2021 01:34:34 +0100 Subject: [PATCH] Updates for release --- .codecov.yml | 31 ++++++++ .deepsource.toml | 6 +- .github/workflows/codecov.yml | 37 ++++++++++ .github/workflows/codeql-analysis.yml | 50 ++++++------- .github/workflows/main.yml | 36 --------- README.md | 76 +++++++++++++++++-- SECURITY.md | 9 +-- docs/Makefile | 20 +++++ docs/make.bat | 35 +++++++++ docs/source/conf.py | 65 ++++++++++++++++ docs/source/index.rst | 18 +++++ docs/source/modules.rst | 7 ++ docs/source/mridc.data.rst | 45 ++++++++++++ docs/source/mridc.nn.rim.rst | 45 ++++++++++++ docs/source/mridc.nn.rst | 45 ++++++++++++ docs/source/mridc.rst | 54 ++++++++++++++ mridc/data/mri_data.py | 10 +-- mridc/data/transforms.py | 5 +- mridc/evaluate.py | 2 +- mridc/fft.py | 1 + mridc/losses.py | 1 + mridc/nn/__init__.py | 2 +- mridc/nn/cirim.py | 2 +- mridc/nn/e2evn.py | 1 + mridc/nn/unet.py | 1 + mridc/utils.py | 1 + requirements.txt | 1 + scripts/README.md | 102 ++++++++++++++++++++++++++ scripts/run_pics.py | 2 - scripts/run_zf.py | 1 - scripts/train_cirim.py | 2 +- scripts/train_unet.py | 2 +- setup.py | 12 +++ tests/fastmri/conftest.py | 1 + tests/fastmri/create_temp_data.py | 1 + tests/fastmri/test_mri_data.py | 1 + tests/test_fftc.py | 1 + tests/test_models.py | 5 +- tests/test_subsample.py | 3 +- tests/test_transforms.py | 2 +- tools/README.md | 28 +++++++ tools/estimate_csm.py | 6 +- 42 files changed, 676 insertions(+), 99 deletions(-) create mode 100644 .codecov.yml create mode 100644 .github/workflows/codecov.yml delete mode 100644 .github/workflows/main.yml create mode 100644 docs/Makefile create mode 100644 docs/make.bat create mode 100644 docs/source/conf.py create mode 100644 docs/source/index.rst create mode 100644 docs/source/modules.rst create mode 100644 docs/source/mridc.data.rst create mode 100644 docs/source/mridc.nn.rim.rst create mode 100644 docs/source/mridc.nn.rst create mode 100644 docs/source/mridc.rst create mode 100644 scripts/README.md create mode 100644 setup.py create mode 100644 tools/README.md diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 00000000..75de9f93 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,31 @@ +# For more configuration details: +# https://docs.codecov.io/docs/codecov-yaml + +# Check if this file is valid by running in bash: +# curl -X POST --data-binary @.codecov.yml https://codecov.io/validate + +# Coverage configuration +# ---------------------- +coverage: + status: + patch: false + + range: 70..90 # First number represents red, and second represents green + # (default is 70..100) + round: down # up, down, or nearest + precision: 2 # Number of decimal places, between 0 and 5 + +# Ignoring Paths +# -------------- +# which folders/files to ignore +ignore: + - scripts/* + - tools/* + - setup.py + +# Pull request comments: +# ---------------------- +# Diff is the Coverage Diff of the pull request. +# Files are the files impacted by the pull request +comment: + layout: diff, files # accepted in any order: reach, diff, flags, and/or files diff --git a/.deepsource.toml b/.deepsource.toml index 5d67892e..7f0d9705 100644 --- a/.deepsource.toml +++ b/.deepsource.toml @@ -14,9 +14,9 @@ enabled = true name = "python" enabled = true - [analyzers.meta] - runtime_version = "3.x.x" - max_line_length = 119 +[analyzers.meta] +runtime_version = "3.x.x" +max_line_length = 119 [[transformers]] name = "black" diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml new file mode 100644 index 00000000..8434f0cf --- /dev/null +++ b/.github/workflows/codecov.yml @@ -0,0 +1,37 @@ +name: Upload report on Codecov + +on: [ push ] + +jobs: + run: + runs-on: ${{ matrix.os }} + + strategy: + matrix: + os: [ ubuntu-latest ] + + env: + OS: ${{ matrix.os }} + PYTHON: '3.9' + + steps: + - uses: actions/checkout@master + + - name: Setup Python + uses: actions/setup-python@master + with: + python-version: 3.9 + + - name: Generate coverage report + run: | + pip install -r requirements.txt + pip install pytest-cov + pytest --cov=./ --cov-report=xml + + - name: "Upload coverage to Codecov" + uses: codecov/codecov-action@v2 + with: + fail_ci_if_error: true + flags: pytests # optional + name: codecov-umbrella # optional + verbose: true # optional (default = false) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ed613c4d..3bfbe557 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -37,34 +37,34 @@ jobs: # Learn more about CodeQL language support at https://git.io/codeql-language-support steps: - - name: Checkout repository - uses: actions/checkout@v2 + - name: Checkout repository + uses: actions/checkout@v2 - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v1 + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language - #- run: | - # make bootstrap - # make release + #- run: | + # make bootstrap + # make release - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 30a4bc10..00000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,36 +0,0 @@ -# This is a basic workflow to help you get started with Actions - -name: CI - -# Controls when the workflow will run -on: - # Triggers the workflow on push or pull request events but only for the main branch - push: - branches: [ main ] - pull_request: - branches: [ main ] - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - # This workflow contains a single job called "build" - build: - # The type of runner that the job will run on - runs-on: ubuntu-latest - - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v2 - - # Runs a single command using the runners shell - - name: Run a one-line script - run: echo Hello, world! - - # Runs a set of commands using the runners shell - - name: Run a multi-line script - run: | - echo Add other actions to build, - echo test, and deploy your project. diff --git a/README.md b/README.md index 6cf0b60f..a03740f0 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,80 @@ -# Magnetic Resonance Imaging Data Consistency +# Data Consistency for Magnetic Resonance Imaging +[![Build Status](https://app.travis-ci.com/wdika/mridc.svg?branch=main)](https://app.travis-ci.com/wdika/mridc) +[![CircleCI](https://circleci.com/gh/wdika/mridc/tree/main.svg?style=svg)](https://circleci.com/gh/wdika/mridc/tree/main) +[![codecov](https://codecov.io/gh/wdika/mridc/branch/main/graph/badge.svg?token=KPPQ33DOTF)](https://codecov.io/gh/wdika/mridc) +[![DeepSource](https://deepsource.io/gh/wdika/mridc.svg/?label=active+issues&show_trend=true&token=txj87v43GA6vhpbSwPEUTQtX)](https://deepsource.io/gh/wdika/mridc/?ref=repository-badge) +[![DeepSource](https://deepsource.io/gh/wdika/mridc.svg/?label=resolved+issues&show_trend=true&token=txj87v43GA6vhpbSwPEUTQtX)](https://deepsource.io/gh/wdika/mridc/?ref=repository-badge) +--- + +**Data Consistency (DC) is crucial for generalization in multi-modal MRI data and robustness in detecting pathology.** + +This repo implements the following reconstruction methods: + +- Cascades of Independently Recurrent Inference Machines (CIRIM) [1], +- Independently Recurrent Inference Machines (IRIM) [2, 3], +- End-to-End Variational Network (E2EVN), [4, 5] +- the UNet [5, 6], +- Compressed Sensing (CS) [7], and +- zero-filled reconstruction (ZF). + +The CIRIM, the RIM, and the E2EVN target unrolled optimization by gradient descent. Thus, DC is implicitly enforced. +Through cascades DC can be explicitly enforced by a designed term [1, 4]. + +## Usage + +Check on [scripts](scripts) how to train models and run a method for reconstruction. + +Check on [tools](tools) for preprocessing and evaluation tools. + +Recommended public datasets to use with this repo: + +- [fastMRI](https://fastmri.org/) [5]. + +## Documentation + +[![Documentation Status](https://readthedocs.org/projects/mridc/badge/?version=latest)](https://mridc.readthedocs.io/en/latest/?badge=latest) + +Read the docs [here](https://mridc.readthedocs.io/en/latest/index.html) ## License -This repo is released under the [Apache 2.0 License](LICENSE). +[![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) ## Citation -Check CITATION.cff file or cite this repository widget. Alternatively cite as +Check CITATION.cff file or cite using the widget. Alternatively cite as ```BibTeX -@misc{MRIDC, - author = {Karkalousos, Dimitrios and Caan, Matthan}, - title = {MRI Data Consistency}, - howpublished = {\url{https://github.com/wdika/mridc}}, - year = {2021} +@misc{mridc, + author={Karkalousos, Dimitrios and Caan, Matthan}, + title={MRIDC: Data Consistency for Magnetic Resonance Imaging}, + year={2021}, + url = {https://github.com/wdika/mridc}, } ``` + +## Bibliography + +[1] CIRIM + +[2] Lønning, K. et al. (2019) ‘Recurrent inference machines for reconstructing heterogeneous MRI data’, Medical Image +Analysis, 53, pp. 64–78. doi: 10.1016/j.media.2019.01.005. + +[3] Karkalousos, D. et al. (2020) ‘Reconstructing unseen modalities and pathology with an efficient Recurrent Inference +Machine’, pp. 1–31. Available at: http://arxiv.org/abs/2012.07819. + +[4] Sriram, A. et al. (2020) ‘End-to-End Variational Networks for Accelerated MRI Reconstruction’, Lecture Notes in +Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), +12262 LNCS, pp. 64–73. doi: 10.1007/978-3-030-59713-9_7. + +[5] Zbontar, J. et al. (2018) ‘fastMRI: An Open Dataset and Benchmarks for Accelerated MRI’, arXiv, pp. 1–35. Available +at: http://arxiv.org/abs/1811.08839. + +[6] Ronneberger, O., Fischer, P. and Brox, T. (2015) ‘U-Net: Convolutional Networks for Biomedical Image Segmentation’, +in Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image +Computing and Computer-Assisted Intervention, pp. 234–241. doi: 10.1007/978-3-319-24574-4_28. + +[7] Lustig, M. et al. (2008) ‘Compressed Sensing MRI’, IEEE Signal Processing Magazine, 25(2), pp. 72–82. doi: +10.1109/MSP.2007.914728. diff --git a/SECURITY.md b/SECURITY.md index 034e8480..88f067a7 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,8 +2,8 @@ ## Supported Versions -Use this section to tell people about which versions of your project are -currently being supported with security updates. +Use this section to tell people about which versions of your project are currently being supported with security +updates. | Version | Supported | | ------- | ------------------ | @@ -16,6 +16,5 @@ currently being supported with security updates. Use this section to tell people how to report a vulnerability. -Tell them where to go, how often they can expect to get an update on a -reported vulnerability, what to expect if the vulnerability is accepted or -declined, etc. +Tell them where to go, how often they can expect to get an update on a reported vulnerability, what to expect if the +vulnerability is accepted or declined, etc. diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..d0c3cbf1 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000..061f32f9 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 00000000..10acb276 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,65 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys + +# +sys.path.insert(0, os.path.abspath("../..")) + +# -- Project information ----------------------------------------------------- + +project = "mridc" +copyright = "2021, Dimitrios Karkalousos" +author = "Dimitrios Karkalousos" + +# The full version, including alpha/beta/rc tags +release = "v.0.0.1" + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "sphinx.ext.githubpages", + "sphinx.ext.doctest", + "sphinx.ext.autosummary", + "sphinx.ext.autosectionlabel", +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "alabaster" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 00000000..c637561d --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,18 @@ +.. mridc documentation master file, created by +sphinx-quickstart on Tue Nov 30 01:20:12 2021. +You can adapt this file completely to your liking, but it should at least +contain the root `toctree` directive. + +Welcome to mridc's documentation! +================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/source/modules.rst b/docs/source/modules.rst new file mode 100644 index 00000000..af286e6a --- /dev/null +++ b/docs/source/modules.rst @@ -0,0 +1,7 @@ +mridc +===== + +.. toctree:: + :maxdepth: 4 + + mridc diff --git a/docs/source/mridc.data.rst b/docs/source/mridc.data.rst new file mode 100644 index 00000000..b663c08a --- /dev/null +++ b/docs/source/mridc.data.rst @@ -0,0 +1,45 @@ +mridc.data package +================== + +Submodules +---------- + +mridc.data.mri\_data module +--------------------------- + +.. automodule:: mridc.data.mri_data + :members: + :undoc-members: + :show-inheritance: + +mridc.data.subsample module +--------------------------- + +.. automodule:: mridc.data.subsample + :members: + :undoc-members: + :show-inheritance: + +mridc.data.transforms module +---------------------------- + +.. automodule:: mridc.data.transforms + :members: + :undoc-members: + :show-inheritance: + +mridc.data.volume\_sampler module +--------------------------------- + +.. automodule:: mridc.data.volume_sampler + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mridc.data + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mridc.nn.rim.rst b/docs/source/mridc.nn.rim.rst new file mode 100644 index 00000000..ec056481 --- /dev/null +++ b/docs/source/mridc.nn.rim.rst @@ -0,0 +1,45 @@ +mridc.nn.rim package +==================== + +Submodules +---------- + +mridc.nn.rim.conv\_layers module +-------------------------------- + +.. automodule:: mridc.nn.rim.conv_layers + :members: + :undoc-members: + :show-inheritance: + +mridc.nn.rim.rim\_block module +------------------------------ + +.. automodule:: mridc.nn.rim.rim_block + :members: + :undoc-members: + :show-inheritance: + +mridc.nn.rim.rnn\_cells module +------------------------------ + +.. automodule:: mridc.nn.rim.rnn_cells + :members: + :undoc-members: + :show-inheritance: + +mridc.nn.rim.utils module +------------------------- + +.. automodule:: mridc.nn.rim.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mridc.nn.rim + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mridc.nn.rst b/docs/source/mridc.nn.rst new file mode 100644 index 00000000..f5a0fc42 --- /dev/null +++ b/docs/source/mridc.nn.rst @@ -0,0 +1,45 @@ +mridc.nn package +================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + mridc.nn.rim + +Submodules +---------- + +mridc.nn.cirim module +--------------------- + +.. automodule:: mridc.nn.cirim + :members: + :undoc-members: + :show-inheritance: + +mridc.nn.e2evn module +--------------------- + +.. automodule:: mridc.nn.e2evn + :members: + :undoc-members: + :show-inheritance: + +mridc.nn.unet module +-------------------- + +.. automodule:: mridc.nn.unet + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mridc.nn + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mridc.rst b/docs/source/mridc.rst new file mode 100644 index 00000000..601ccee8 --- /dev/null +++ b/docs/source/mridc.rst @@ -0,0 +1,54 @@ +mridc package +============= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + mridc.data + mridc.nn + +Submodules +---------- + +mridc.evaluate module +--------------------- + +.. automodule:: mridc.evaluate + :members: + :undoc-members: + :show-inheritance: + +mridc.fft module +---------------- + +.. automodule:: mridc.fft + :members: + :undoc-members: + :show-inheritance: + +mridc.losses module +------------------- + +.. automodule:: mridc.losses + :members: + :undoc-members: + :show-inheritance: + +mridc.utils module +------------------ + +.. automodule:: mridc.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mridc + :members: + :undoc-members: + :show-inheritance: diff --git a/mridc/data/mri_data.py b/mridc/data/mri_data.py index a2899631..e7179e01 100644 --- a/mridc/data/mri_data.py +++ b/mridc/data/mri_data.py @@ -1,23 +1,23 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import logging import os -import yaml import random from pathlib import Path from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union -from xml.etree.ElementTree import Element -from defusedxml.ElementTree import fromstring import h5py import numpy as np import torch +import yaml +from defusedxml.ElementTree import fromstring from torch.utils.data import Dataset -def et_query(root: Element, qlist: Sequence[str], namespace: str = "https://www.ismrm.org/ISMRMRD") -> str: +def et_query(root: str, qlist: Sequence[str], namespace: str = "https://www.ismrm.org/ISMRMRD") -> str: """ Query an XML element for a list of attributes. @@ -183,7 +183,7 @@ def __init__( # load dataset cache if we have and user wants to use it if self.dataset_cache_file.exists() and use_dataset_cache: with open(self.dataset_cache_file, "rb") as f: - dataset_cache = yaml.load(f) + dataset_cache = yaml.safe_load(f) else: dataset_cache = {} diff --git a/mridc/data/transforms.py b/mridc/data/transforms.py index 5e20cceb..f8c3a79c 100644 --- a/mridc/data/transforms.py +++ b/mridc/data/transforms.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI from typing import Dict, Optional, Sequence, Tuple, Union, Any, List @@ -372,7 +373,9 @@ def __call__( ) else: masked_kspace = kspace - acc = torch.tensor([np.around(mask.size / mask.sum())]) if mask is not None else torch.tensor([1]) # type: ignore + acc = ( + torch.tensor([np.around(mask.size / mask.sum())]) if mask is not None else torch.tensor([1]) + ) # type: ignore # Cropping after masking. if self.crop_size is not None and not self.crop_before_masking: diff --git a/mridc/evaluate.py b/mridc/evaluate.py index e30d0cef..c3a101b3 100644 --- a/mridc/evaluate.py +++ b/mridc/evaluate.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import argparse @@ -11,7 +12,6 @@ import numpy as np import pandas as pd import torch - from runstats import Statistics from skimage.filters import threshold_otsu from skimage.metrics import peak_signal_noise_ratio, structural_similarity diff --git a/mridc/fft.py b/mridc/fft.py index bb31d54d..1d763ea8 100644 --- a/mridc/fft.py +++ b/mridc/fft.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI from typing import List, Optional, Union diff --git a/mridc/losses.py b/mridc/losses.py index e7a074c9..95da7115 100644 --- a/mridc/losses.py +++ b/mridc/losses.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import torch diff --git a/mridc/nn/__init__.py b/mridc/nn/__init__.py index 2399a7d9..0847b3c8 100644 --- a/mridc/nn/__init__.py +++ b/mridc/nn/__init__.py @@ -2,5 +2,5 @@ __author__ = "Dimitrios Karkalousos" from mridc.nn.cirim import CIRIM -from mridc.nn.unet import Unet from mridc.nn.e2evn import VarNet +from mridc.nn.unet import Unet diff --git a/mridc/nn/cirim.py b/mridc/nn/cirim.py index c4d9050c..a3b16981 100644 --- a/mridc/nn/cirim.py +++ b/mridc/nn/cirim.py @@ -7,8 +7,8 @@ from torch import nn from mridc import ifft2c, complex_mul, complex_conj -from .rim.rim_block import RIMBlock from .e2evn import SensitivityModel +from .rim.rim_block import RIMBlock from ..data.transforms import center_crop_to_smallest diff --git a/mridc/nn/e2evn.py b/mridc/nn/e2evn.py index dd54f720..bd8e48e8 100644 --- a/mridc/nn/e2evn.py +++ b/mridc/nn/e2evn.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import math diff --git a/mridc/nn/unet.py b/mridc/nn/unet.py index cc9e8534..20f13e4e 100644 --- a/mridc/nn/unet.py +++ b/mridc/nn/unet.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import torch diff --git a/mridc/utils.py b/mridc/utils.py index c4d35786..a3cce32c 100644 --- a/mridc/utils.py +++ b/mridc/utils.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI from pathlib import Path diff --git a/requirements.txt b/requirements.txt index 0c44c33b..ece1119f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +coverage>=6.1.2 defusedxml~=0.7.1 h5py~=3.6.0 numpy~=1.21.4 diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 00000000..ad09207b --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,102 @@ +Examples for training and running reconstruction methods. + +## Cascades of Independently Recurrent Inference Machines (CIRIM) + +To enforce explicitly formulated Data Consistency, turn off the --nodc option. For the CIRIM is preferable to have on. + +* ### **Train** a CIRIM with 8 cascades: + ```shell + python -m scripts.train_cirim --data_path data/ --exp_dir models/ --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --num_cascades 8 --time_steps 8 + --recurrent_layer IndRNN --conv_filters 64 64 2 --conv_kernels 5 3 3 --conv_dilations 1 2 1 + --conv_bias True True False --recurrent_filters 64 64 0 --recurrent_kernels 3 3 3 --recurrent_dilations 1 1 0 + --recurrent_bias True True False --depth 2 --conv_dim 2 --loss_fn "l1" --no_dc --keep_eta --output_type "SENSE" + --fft_type orthogonal --lr 0.001 --batch_size 1 --num_epochs 100 --num_workers 4 + ``` +* ### **Run** a CIRIM with 8 cascades: + ```shell + python -m scripts.run_cirim --data_path data/ --checkpoint models/ --out_dir recons/ + --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --num_cascades 8 --time_steps 8 + --recurrent_layer IndRNN --conv_filters 64 64 2 --conv_kernels 5 3 3 --conv_dilations 1 2 1 + --conv_bias True True False --recurrent_filters 64 64 0 --recurrent_kernels 3 3 3 --recurrent_dilations 1 1 0 + --recurrent_bias True True False --depth 2 --conv_dim 2 --loss_fn "l1" --no_dc --keep_eta --output_type "SENSE" + --fft_type orthogonal --lr 0.001 --batch_size 1 --num_epochs 100 --num_workers 4 + ``` + +## Recurrent Inference Machines (RIM) + +* ### **Train** a RIM: + ```shell + python -m scripts.train_cirim --data_path data/ --exp_dir models/ --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --num_cascades 1 --time_steps 8 + --recurrent_layer GRU --conv_filters 64 64 2 --conv_kernels 5 3 3 --conv_dilations 1 2 1 + --conv_bias True True False --recurrent_filters 64 64 0 --recurrent_kernels 3 3 3 --recurrent_dilations 1 1 0 + --recurrent_bias True True False --depth 2 --conv_dim 2 --loss_fn "l1" --no_dc --keep_eta --output_type "SENSE" + --fft_type orthogonal --lr 0.001 --batch_size 1 --num_epochs 100 --num_workers 4 + ``` +* ### **Run** a CIRIM with 8 cascades: + ```shell + python -m scripts.run_cirim --data_path data/ --checkpoint models/ --out_dir recons/ + --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --num_cascades 1 --time_steps 8 + --recurrent_layer GRU --conv_filters 64 64 2 --conv_kernels 5 3 3 --conv_dilations 1 2 1 + --conv_bias True True False --recurrent_filters 64 64 0 --recurrent_kernels 3 3 3 --recurrent_dilations 1 1 0 + --recurrent_bias True True False --depth 2 --conv_dim 2 --loss_fn "l1" --no_dc --keep_eta --output_type "SENSE" + --fft_type orthogonal --lr 0.001 --batch_size 1 --num_epochs 100 --num_workers 4 + ``` + +## End-to-End Variational Network (E2EVN) + +To omit explicitly formulated Data Consistency, turn on the --nodc option. For the E2EVN is preferable to have it off. + +* ### **Train** an E2EVN with 12 cascades: + ```shell + python -m scripts.train_e2evn --data_path data/ --exp_dir models/ --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --num_cascades 12 --pools 2 --chans 14 + --unet_padding_size 11 --loss_fn "l1" --no_dc --keep_eta --output_type "SENSE" --fft_type orthogonal --lr 0.001 + --batch_size 1 --num_epochs 100 --num_workers 4 + ``` +* ### **Run** an E2EVN with 12 cascades: + ```shell + python -m scripts.run_e2evn --data_path data/ --checkpoint models/ --out_dir recons/ + --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --num_cascades 12 --pools 2 --chans 14 + --unet_padding_size 11 --loss_fn "l1" --no_dc --keep_eta --output_type "SENSE" --fft_type orthogonal --lr 0.001 + --batch_size 1 --num_epochs 100 --num_workers 4 + ``` + +## UNet + +* ### **Train** an UNet with 64 channels and without dropout: + ```shell + python -m scripts.train_unet --data_path data/ --exp_dir models/ --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --in_chans 2 --out_chans 2 --chans 64 + --num_pools 2 --unet_padding_size 11 --drop_prob 0.0 --loss_fn "l1" --no_dc --keep_eta --output_type "SENSE" + --fft_type orthogonal --lr 0.001 --batch_size 1 --num_epochs 100 --num_workers 4 + ``` +* ### **Run** an UNet with 64 channels and without dropout: + ```shell + python -m scripts.run_unet --data_path data/ --checkpoint models/ --out_dir recons/ + --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --in_chans 2 --out_chans 2 --chans 64 + --num_pools 2 --unet_padding_size 11 --drop_prob 0.0 --loss_fn "l1" --no_dc --keep_eta --output_type "SENSE" + --fft_type orthogonal --lr 0.001 --batch_size 1 --num_epochs 100 --num_workers 4 + ``` + +## Compressed Sensing + +* ### **Run** Parallel Imaging Compressed Sensing reconstruction: + ```shell + python -m scripts.run_pics --data_path data/ --out_dir recons/ --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --num_iters 60 --reg_wt .005 + --fft_type orthogonal --num_procs 4 + ``` + +## Zero-Filled + +* ### **Run** Zero-Filled SENSE reconstruction: + ```shell + python -m scripts.run_zf --data_path data/ --out_dir recons/ --sense_path coil_sensitivity_maps/ + --mask_type gaussian2d --accelerations 4 10 --center_fractions .7 .7 --fft_type orthogonal --num_procs 4 + ``` \ No newline at end of file diff --git a/scripts/run_pics.py b/scripts/run_pics.py index 8367de3c..3405d907 100644 --- a/scripts/run_pics.py +++ b/scripts/run_pics.py @@ -252,7 +252,6 @@ def create_arg_parser(): parser = argparse.ArgumentParser(description="PICS") parser.add_argument("data_path", type=pathlib.Path, help="Path to the data folder") - parser.add_argument("checkpoint", type=pathlib.Path, help="Path to the checkpoint file") parser.add_argument("out_dir", type=pathlib.Path, help="Path to the output folder") parser.add_argument("--sense_path", type=pathlib.Path, help="Path to the sense folder") parser.add_argument("--mask_path", type=pathlib.Path, help="Path to the mask folder") @@ -296,7 +295,6 @@ def create_arg_parser(): ) parser.add_argument("--fft_type", type=str, default="orthogonal", help="Type of FFT to use") parser.add_argument("--progress_bar_refresh", type=int, default=10, help="Progress bar refresh rate") - parser.add_argument("--num_workers", type=int, default=4, help="Number of workers for the data loader") parser.add_argument( "--data_parallel", action="store_true", help="If set, use multiple GPUs using data parallelism" ) diff --git a/scripts/run_zf.py b/scripts/run_zf.py index 1a82d840..ee5916c7 100644 --- a/scripts/run_zf.py +++ b/scripts/run_zf.py @@ -124,7 +124,6 @@ def create_arg_parser(): parser = argparse.ArgumentParser(description="ZERO-FILLED") parser.add_argument("data_path", type=pathlib.Path, help="Path to the data folder") - parser.add_argument("checkpoint", type=pathlib.Path, help="Path to the checkpoint file") parser.add_argument("out_dir", type=pathlib.Path, help="Path to the output folder") parser.add_argument("--sense_path", type=pathlib.Path, help="Path to the sense folder") parser.add_argument("--mask_path", type=pathlib.Path, help="Path to the mask folder") diff --git a/scripts/train_cirim.py b/scripts/train_cirim.py index 1886c1e1..93b1cd47 100644 --- a/scripts/train_cirim.py +++ b/scripts/train_cirim.py @@ -733,7 +733,7 @@ def create_arg_parser(): help="Number of filters the recurrent layers of for the model", ) parser.add_argument( - "--recurrent_kernels", nargs="+", default=[1, 1, 0], help="Kernel size for the recurrent layers of the model" + "--recurrent_kernels", nargs="+", default=[3, 3, 3], help="Kernel size for the recurrent layers of the model" ) parser.add_argument( "--recurrent_dilations", nargs="+", default=[1, 1, 0], help="Dilations for the recurrent layers of the model" diff --git a/scripts/train_unet.py b/scripts/train_unet.py index ae109d0d..ec00a790 100644 --- a/scripts/train_unet.py +++ b/scripts/train_unet.py @@ -9,7 +9,7 @@ import random import sys import time -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Dict, List, Tuple import numpy as np import torch diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..707787ba --- /dev/null +++ b/setup.py @@ -0,0 +1,12 @@ +from setuptools import setup + +setup( + name="mridc", + version="v.0.0.1", + packages=["mridc", "mridc.nn", "mridc.nn.rim", "mridc.data", "tests", "tests.fastmri", "tools", "scripts"], + url="https://github.com/wdika/mridc", + license="Apache-2.0 License ", + author="Dimitrios Karkalousos", + author_email="d.karkalousos@amsterdamumc.nl", + description="Data Consistency for Magnetic Resonance Imaging", +) diff --git a/tests/fastmri/conftest.py b/tests/fastmri/conftest.py index 43447d1c..f8b63231 100644 --- a/tests/fastmri/conftest.py +++ b/tests/fastmri/conftest.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import numpy as np diff --git a/tests/fastmri/create_temp_data.py b/tests/fastmri/create_temp_data.py index a7484cff..d81f595c 100644 --- a/tests/fastmri/create_temp_data.py +++ b/tests/fastmri/create_temp_data.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import h5py diff --git a/tests/fastmri/test_mri_data.py b/tests/fastmri/test_mri_data.py index 632e26c4..fa922a48 100644 --- a/tests/fastmri/test_mri_data.py +++ b/tests/fastmri/test_mri_data.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI from mridc.data.mri_data import SliceDataset, CombinedSliceDataset diff --git a/tests/test_fftc.py b/tests/test_fftc.py index 5c912e78..f5395a23 100644 --- a/tests/test_fftc.py +++ b/tests/test_fftc.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import numpy as np diff --git a/tests/test_models.py b/tests/test_models.py index e37a34ad..49b43cec 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,5 +1,6 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" + # Parts of the code have been taken from https://github.com/facebookresearch/fastMRI import numpy as np @@ -57,7 +58,9 @@ def test_cirim(shape, cascades, center_fractions, accelerations): ) with torch.no_grad(): - y = torch.view_as_complex(next(cirim.inference(output, output, mask, accumulate_estimates=True))[0][-1]) + y = torch.view_as_complex( + next(cirim.inference(output, output, mask, accumulate_estimates=True))[0][-1] + ) # type: ignore if y.shape[1:] != x.shape[2:4]: raise AssertionError diff --git a/tests/test_subsample.py b/tests/test_subsample.py index c96e594c..5312b610 100644 --- a/tests/test_subsample.py +++ b/tests/test_subsample.py @@ -1,9 +1,8 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" -import pytest -import torch import numpy as np +import pytest from mridc.data.subsample import ( create_mask_for_mask_type, diff --git a/tests/test_transforms.py b/tests/test_transforms.py index dd117c5b..472c614e 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -1,9 +1,9 @@ # encoding: utf-8 __author__ = "Dimitrios Karkalousos" +import numpy as np import pytest import torch -import numpy as np from mridc.data.transforms import ( to_tensor, diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 00000000..9a730ff5 --- /dev/null +++ b/tools/README.md @@ -0,0 +1,28 @@ +## Coil Sensitivity maps + +If you are working with data containing a fully-sampled center, you can estimate the coil sensitivity maps using the +estimate_csm.py script. + +## Evaluation + +Evaluate the performance of a method by comparing the reconstructions to the ground truth, using the +[reconstruction evaluation script](mridc/evaluate.py). + +Example for evaluating zero-filled reconstruction and returning the mean +/- std performance, + +```bash +python -m mridc.evaluate target_path/ predictions_path/ output_path/ --method zero-filled --acceleration 10 +--type mean_std +``` + +Example for evaluating zero-filled reconstruction and returning a csv with the scores per slice +(to use for further visualization), + +```bash +python -m mridc.evaluate target_path/ predictions_path/ output_path/ --method zero-filled --acceleration 10 +--type all_slices +``` + +## Visualize results + +Simple drop to png your reconstruction using the save2png.py script. \ No newline at end of file diff --git a/tools/estimate_csm.py b/tools/estimate_csm.py index 0b49da83..566ea944 100644 --- a/tools/estimate_csm.py +++ b/tools/estimate_csm.py @@ -6,18 +6,16 @@ import multiprocessing import pathlib import time -import numpy as np from collections import defaultdict - from typing import Any, Optional, Tuple +import bart import h5py +import numpy as np from mridc.data.mri_data import SliceDataset from mridc.data.transforms import tensor_to_complex_np, to_tensor -import bart - class DataTransform: """Data transform for BART."""