diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml new file mode 100644 index 0000000000..e970f69774 --- /dev/null +++ b/.github/workflows/checks.yml @@ -0,0 +1,195 @@ +name: checks + +on: + pull_request: + types: [opened, synchronize] + branches: + - master + - dev + + + +jobs: + linux-clang-build: + + runs-on: ubuntu-latest + + env: + CFLAGS: -Werror + QT_SELECT: qt5 + + steps: + - uses: actions/checkout@v1 + + - name: install dependencies + run: | + sudo apt-get update + sudo apt-get install clang-8 libqt5opengl5-dev libqt5svg5-dev libglvnd-dev libeigen3-dev zlib1g-dev libfftw3-dev + + - name: configure + run: ./configure -assert || { cat configure.log; false; } + + - name: build + run: ./build -nowarnings -persistent -nopaginate || { cat build.log; false; } + + - name: unit tests + run: ./run_tests units || { cat testing_units.log; false; } + + - name: binary tests + run: ./run_tests binaries || { cat testing_binaries.log; false; } + + + + + + + linux-gcc-build: + + runs-on: ubuntu-latest + + env: + CXX: g++-9 + CFLAGS: -Werror + QT_SELECT: qt5 + + steps: + - uses: actions/checkout@v1 + + - name: install dependencies + run: | + sudo apt-get update + sudo apt-get install g++-9 libqt5opengl5-dev libqt5svg5-dev libglvnd-dev libeigen3-dev zlib1g-dev libfftw3-dev + + - name: configure + run: ./configure -nooptim || { cat configure.log; false; } + + - name: build + run: ./build -nowarnings -persistent -nopaginate || { cat build.log; false; } + + + + + + + macos-build: + + runs-on: macos-latest + + env: + CFLAGS: -Werror + + steps: + - uses: actions/checkout@v1 + + - name: install dependencies + run: | + brew update + brew install qt5 eigen pkg-config libtiff fftw libpng + brew link --force qt5 + + - name: configure + run: ./configure -assert || { cat configure.log; false; } + + - name: build + run: ./build -nowarnings -persistent -nopaginate || { cat build.log; false; } + + - name: unit tests + run: ./run_tests units || { cat testing_units.log; false; } + + - name: binary tests + run: ./run_tests binaries || { cat testing_binaries.log; false; } + + - name: check command documentation + run: ./docs/generate_user_docs.sh && git diff --exit-code docs/ + + + + + + windows-build: + + runs-on: windows-latest + + env: + CFLAGS: -Werror + MSYSTEM: MINGW64 + MSYSCON: defterm + CHERE_INVOKING: enabled_from_arguments + MSYS2_NOSTART: yes + + steps: + + - uses: actions/checkout@v1 + + - name: fetch and install MSYS2 + run: bash -c 'curl -sL https://github.com/MRtrix3/MinGW/releases/download/1.0/msys2.tar.{0,1} | tar xf -' + + - name: run qtbinpatcher + shell: cmd + run: msys64\msys2_shell.cmd -c "qtbinpatcher --qt-dir=$(dirname $(which qmake))" + + - name: configure + shell: cmd + run: msys64\msys2_shell.cmd -c "./configure -assert || { cat configure.log; false; }" + + - name: build + shell: cmd + run: msys64\msys2_shell.cmd -c "./build -nowarnings -persistent -nopaginate || { cat build.log; false; }" + + - name: unit tests + shell: cmd + run: msys64\msys2_shell.cmd -c "./run_tests units || { cat testing_units.log; false; }" + + - name: binary tests + shell: cmd + run: msys64\msys2_shell.cmd -c "./run_tests binaries || { cat testing_binaries.log; false; }" + + + + + secondary-checks: + + runs-on: ubuntu-latest + + env: + QT_SELECT: qt5 + + steps: + - uses: actions/checkout@v1 + + - name: install dependencies + run: | + sudo apt-get update + sudo apt-get install pylint pylint3 python3-sphinx sphinx-rtd-theme-common python3-recommonmark python3-sphinx-rtd-theme + + + - name: check syntax + run: ./check_syntax || { cat syntax.log; false; } + + - name: pylint (Python 2) + run: | + echo "__version__ = 'pylint testing' #pylint: disable=unused-variable" > ./lib/mrtrix3/_version.py + PYTHON=python2 ./run_pylint || { cat pylint.log; false; } + + - name: pylint (Python 3) + run: | + echo "__version__ = 'pylint testing' #pylint: disable=unused-variable" > ./lib/mrtrix3/_version.py + PYTHON=python3 ./run_pylint || { cat pylint.log; false; } + + - name: check building of documentation + run: python3 -m sphinx -n -N -W -w sphinx.log docs/ tmp/ + + - name: install build dependencies + run: sudo apt-get install clang-8 libqt5opengl5-dev libqt5svg5-dev libglvnd-dev libeigen3-dev zlib1g-dev libfftw3-dev + + - name: check configure with Python 2 + run: python2 ./configure || { cat configure.log; false; } + + - name: check build with Python 2 + run: python2 ./build -dryrun || { cat build.log; false; } + + - name: check configure with Python 3 + run: python3 ./configure || { cat configure.log; false; } + + - name: check build with Python 3 + run: python3 ./build -dryrun || { cat build.log; false; } diff --git a/.github/workflows/package-linux-anaconda.yml b/.github/workflows/package-linux-anaconda.yml new file mode 100644 index 0000000000..ab66b33371 --- /dev/null +++ b/.github/workflows/package-linux-anaconda.yml @@ -0,0 +1,33 @@ +name: package-linux-anaconda + +on: + release: + types: [created] + +jobs: + package: + + runs-on: ubuntu-latest + + steps: + + - name: install conda-build # and anaconda-client + run: $CONDA/bin/conda install -y conda-build #anaconda-client + + - name: fetch recipe + run: | + git clone https://github.com/MRtrix3/conda-build.git + mv conda-build/* . + + - name: build package + run: ./run.sh ${GITHUB_REF#refs/*/} ${GITHUB_REPOSITORY%/*} conda-linux + +# - name: upload package to Anaconda.org +# run: $CONDA/bin/anaconda -t ${{ secrets.ANACONDA_TOKEN }} upload -u MRtrix3 $(sudo $CONDA/bin/conda build conda-build/ --output) + + - name: Upload package to GitHub Release + uses: AButler/upload-release-assets@v2.0 + with: + files: '*.tar.bz2' + repo-token: ${{ secrets.GITHUB_TOKEN }} + diff --git a/.github/workflows/package-macos-anaconda.yml b/.github/workflows/package-macos-anaconda.yml new file mode 100644 index 0000000000..68189eaa63 --- /dev/null +++ b/.github/workflows/package-macos-anaconda.yml @@ -0,0 +1,38 @@ +name: package-macos-anaconda + +on: + release: + types: [created] + +jobs: + package: + + runs-on: macos-latest + + steps: + + - name: fetch MacOSX 10.11 SDK + run: curl -L https://github.com/phracker/MacOSX-SDKs/releases/download/MacOSX10.11.sdk/MacOSX10.11.sdk.tar.xz | sudo tar xf - -C /opt/ + + - name: install conda-build # and anaconda-client + run: sudo $CONDA/bin/conda install -y conda-build # anaconda-client + + - name: fetch recipe + run: | + git clone https://github.com/MRtrix3/conda-build.git + mv conda-build/* . + { echo "CONDA_BUILD_SYSROOT:"; echo " - /opt/MacOSX10.11.sdk # [osx]"; } > conda_build_config.yaml + + - name: build package + run: sudo CONDA="$CONDA" ./run.sh ${GITHUB_REF#refs/*/} ${GITHUB_REPOSITORY%/*} conda-macos + +# - name: upload package +# run: $CONDA/bin/anaconda -t ${{ secrets.ANACONDA_TOKEN }} upload -u MRtrix3 $(sudo $CONDA/bin/conda build conda-build/ --output) + + - name: Upload package to GitHub Release + uses: AButler/upload-release-assets@v2.0 + with: + files: '*.tar.bz2' + repo-token: ${{ secrets.GITHUB_TOKEN }} + + diff --git a/.github/workflows/package-macos-native.yml b/.github/workflows/package-macos-native.yml new file mode 100644 index 0000000000..bd31b64e87 --- /dev/null +++ b/.github/workflows/package-macos-native.yml @@ -0,0 +1,27 @@ +name: package-macos-native + +on: + release: + types: [created] + +jobs: + package: + + runs-on: macos-latest + + steps: + + - name: fetch recipe + run: git clone https://github.com/MRtrix3/macos-installer.git + + - name: build packages + run: ./build ${GITHUB_REF#refs/*/} + working-directory: ./macos-installer + + - name: Upload package to GitHub Release + uses: AButler/upload-release-assets@v2.0 + with: + files: 'macos-installer/mrtrix3-macos-*.tar.gz' + repo-token: ${{ secrets.GITHUB_TOKEN }} + + diff --git a/.github/workflows/package-windows-msys2.yml b/.github/workflows/package-windows-msys2.yml new file mode 100644 index 0000000000..7ac55ecbe3 --- /dev/null +++ b/.github/workflows/package-windows-msys2.yml @@ -0,0 +1,40 @@ +name: package-windows-msys2 + +on: + release: + types: [created] + + +jobs: + package: + + runs-on: windows-latest + + env: + MSYSTEM: MINGW64 + MSYSCON: defterm + CHERE_INVOKING: enabled_from_arguments + MSYS2_NOSTART: yes + + steps: + + - name: fetch and install MSYS2 + run: bash -c 'curl -sL https://github.com/MRtrix3/MinGW/releases/download/1.0/msys2.tar.{0,1} | tar xf -' + + - name: run qtbinpatcher + shell: cmd + run: msys64\msys2_shell.cmd -c "qtbinpatcher --qt-dir=$(dirname $(which qmake))" + + - name: fetch PKGBUILD + shell: cmd + run: msys64\msys2_shell.cmd -c "git clone https://github.com/MRtrix3/MinGW.git && mv MinGW/* ." + + - name: run makepkg + shell: cmd + run: msys64\msys2_shell.cmd -c "./run.sh ${GITHUB_REF#refs/*/} ${GITHUB_REPOSITORY%%/*}" + + - name: Upload package to GitHub Release + uses: AButler/upload-release-assets@v2.0 + with: + files: '*.tar.xz' + repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 090f1149f8..130a8950c9 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ build.* /dev/ /bin/ /tmp/ +/.vscode/ .cproject .idea .project @@ -43,6 +44,7 @@ build.* *.creator.user *.files *.includes +*.code-workspace /debug/ /assert/ testing.log @@ -51,3 +53,5 @@ testing/bin/ testing/src/project_version.cpp testing/src/project_version.h .__tmp.log +.check_syntax.tmp +.check_syntax2.tmp diff --git a/.gitmodules b/.gitmodules index 569a34903d..6cb9f76d98 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "testing/data"] - path = testing/data + path = testing/binaries/data url = https://github.com/MRtrix3/test_data +[submodule "testing/script_data"] + path = testing/scripts/data + url = https://github.com/MRtrix3/script_test_data.git diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5fe714f639..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,136 +0,0 @@ -language: cpp -cache: - apt: true - packages: true - directories: - - $HOME/.cache/pip -addons: - apt: - sources: - - ubuntu-toolchain-r-test - - llvm-toolchain-xenial-8 - packages: - - g++-9 - - clang-8 - - zlib1g-dev - - libqt4-opengl-dev - - python3 - - python-pip - - python3-pip - - python3-setuptools -matrix: - include: - ################################################# - # Run a single OSX job to test compilation only # - ################################################# - - os: osx - env: CFLAGS="-Werror" TRAVIS_CXX=clang++ py=python test=build - ############################################### - # All subsequent tests are performed on Linux # - ############################################### - ##################################################################### - # Build binaries, run tests and check documentation: Clang, Python3 # - # (Note: Run this one first, since it'll be the longest job) # - ##################################################################### - - os: linux - env: CFLAGS="-Werror" TRAVIS_CXX=clang++-8 py=python3 test=run - ####################################################################### - # Build binaries (without optimisation): GCC, Python2 # - # (Also ensures both Python 2 and 3 are tested for configure & build) # - ####################################################################### - - os: linux - env: CFLAGS="-Werror" TRAVIS_CXX=g++-9 py=python2 test=build - ####################################################################### - # Generate documentation through Sphinx; Use both Python2 and Python3 # - ####################################################################### - - os: linux - env: py=python2 test=sphinx - - os: linux - env: py=python3 test=sphinx - ########################################################### - # Run the check_syntax script; only needs to be done once # - ########################################################### - - os: linux - env: test=syntax - ############################################## - # Run PyLint tests; both Python2 and Python3 # - ############################################## - - os: linux - env: py=python2 test=pylint - - os: linux - env: py=python3 test=pylint -install: - ################################################## - # OSX build requires explicit installation of QT # - ################################################## - - | - if [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then - brew update - brew install qt5 - brew link --force qt5 - fi - ######################################### - # Installation requirements for MRtrix3 # - ######################################### - - export NUMBER_OF_PROCESSORS=2 - - export PATH=`pwd`/bin:${PATH} - - export PYTHONPATH=`pwd`/lib:${PYTHONPATH} - - | - if [[ "${test}" == "sphinx" ]]; then - if [[ "${py}" == "python2" ]]; then - pip install urllib3[secure]; - pip install --user recommonmark sphinx sphinx-rtd-theme typing; - else - pip3 install --user recommonmark sphinx sphinx-rtd-theme typing; - fi - fi - - | - if [[ "${test}" == "pylint" ]]; then - if [[ "${py}" == "python2" ]]; then - pip install --user pylint; - else - pip3 install --user --upgrade setuptools; - pip3 install --user pylint; - fi - fi - - | - if [[ "${test}" == "build" || "${test}" == "run" ]]; then - export EIGEN_CFLAGS="-idirafter `pwd`/../eigen"; - ( cd ..; git clone https://github.com/eigenteam/eigen-git-mirror.git eigen; cd eigen; git checkout branches/3.3 ) - fi -before_script: - ####################################################################################################################### - # TravisCI sets CXX based on what it thinks the compiler should be; therefore we need to set it as late as possible # - # Since this is not needed for the installation section, only for the travis.sh script, moved this to "before_script" # - ####################################################################################################################### - - | - if [[ -n "${TRAVIS_CXX}" ]]; then - export CXX=${TRAVIS_CXX} - fi -script: - - ./travis.sh -before_cache: - - rm -f $HOME/.cache/pip/log/debug.log -after_failure: - - | - case "${test}" in # CLICK HERE TO FIND REPORTED ERRORS - "sphinx") - cat sphinx.log - ;; - "syntax") - cat syntax.log - ;; - "pylint") - cat pylint.log - ;; - "build") - cat configure.log - cat build.log - ;; - *) - cat configure.log - cat build.log - cat testing.log - cat gitdiff.log - esac - - sleep 10 diff --git a/README.md b/README.md index 3fb71e23b9..4d278b5794 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,12 @@ # MRtrix -[![Build Status](https://travis-ci.org/MRtrix3/mrtrix3.svg?branch=updated_syntax)](https://travis-ci.org/MRtrix3/mrtrix3) +[![Build Status](https://github.com/MRtrix3/mrtrix3/workflows/checks/badge.svg)](https://github.com/MRtrix3/mrtrix3/actions) Please visit the [official website for MRtrix](http://www.mrtrix.org) to access the [documentation for MRtrix3](http://mrtrix.readthedocs.org/), including detailed installation instructions. ## Getting help -Support and general discussion is hosted on the [MRtrix3 Community Forum](http://community.mrtrix.org/). Please -address all MRtrix3-related queries there. You can use you GitHub or Google login to post questions. +Support and general discussion is hosted on the [MRtrix3 Community Forum](http://community.mrtrix.org/). Please also look through the Frequently Asked Questions on the [wiki section of the forum](http://community.mrtrix.org/c/wiki). You can address all MRtrix3-related queries there, using your GitHub or Google login to post questions. ## Quick install diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 9276ba731c..0000000000 --- a/appveyor.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 1.0.{build} -image: Visual Studio 2017 -platform: x64 -configuration: Release -clone_folder: c:\msys64\mrtrix3 -shallow_clone: false - -build_script: - - cmd: echo "empty build to allow passing on master branch" - diff --git a/bin/5ttgen b/bin/5ttgen index 3414b1c889..502fefab03 100755 --- a/bin/5ttgen +++ b/bin/5ttgen @@ -1,5 +1,20 @@ #!/usr/bin/env python +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + # Script that generates a five-tissue-type (5TT) segmented image: the format appropriate for ACT # # In this script, major stages of processing can be performed in one of two ways: @@ -9,48 +24,47 @@ # defining the appropriate functions; 5ttgen will automatically make that algorithm available at the command-line -# Make the corresponding MRtrix3 Python libraries available -import inspect, os, sys -lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib')) -if not os.path.isdir(lib_folder): - sys.stderr.write('Unable to locate MRtrix3 Python libraries') - sys.exit(1) -sys.path.insert(0, lib_folder) +def usage(cmdline): #pylint: disable=unused-variable + from mrtrix3 import algorithm #pylint: disable=no-name-in-module, import-outside-toplevel + + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Generate a 5TT image suitable for ACT') + cmdline.add_citation('Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. NeuroImage, 2012, 62, 1924-1938') + cmdline.add_description('5ttgen acts as a \'master\' script for generating a five-tissue-type (5TT) segmented tissue image suitable for use in Anatomically-Constrained Tractography (ACT). A range of different algorithms are available for completing this task. When using this script, the name of the algorithm to be used must appear as the first argument on the command-line after \'5ttgen\'. The subsequent compulsory arguments and options available depend on the particular algorithm being invoked.') + cmdline.add_description('Each algorithm available also has its own help page, including necessary references; e.g. to see the help page of the \'fsl\' algorithm, type \'5ttgen fsl\'.') + + common_options = cmdline.add_argument_group('Options common to all 5ttgen algorithms') + common_options.add_argument('-nocrop', action='store_true', default=False, help='Do NOT crop the resulting 5TT image to reduce its size (keep the same dimensions as the input image)') + common_options.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Represent the amygdalae and hippocampi as sub-cortical grey matter in the 5TT image') + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) -from mrtrix3 import algorithm, app, path, run -app.init('Robert E. Smith (robert.smith@florey.edu.au)', 'Generate a 5TT image suitable for ACT') -app.cmdline.addCitation('', 'Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. NeuroImage, 2012, 62, 1924-1938', False) -app.cmdline.addDescription('5ttgen acts as a \'master\' script for generating a five-tissue-type (5TT) segmented tissue image suitable for use in Anatomically-Constrained Tractography (ACT). A range of different algorithms are available for completing this task. When using this script, the name of the algorithm to be used must appear as the first argument on the command-line after \'5ttgen\'. The subsequent compulsory arguments and options available depend on the particular algorithm being invoked.') -app.cmdline.addDescription('Each algorithm available also has its own help page, including necessary references; e.g. to see the help page of the \'fsl\' algorithm, type \'5ttgen fsl\'.') -common_options = app.cmdline.add_argument_group('Options common to all 5ttgen algorithms') -common_options.add_argument('-nocrop', action='store_true', default=False, help='Do NOT crop the resulting 5TT image to reduce its size (keep the same dimensions as the input image)') -common_options.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Represent the amygdalae and hippocampi as sub-cortical grey matter in the 5TT image') +def execute(): #pylint: disable=unused-variable + from mrtrix3 import algorithm, app, run #pylint: disable=no-name-in-module, import-outside-toplevel -# Import the command-line settings for all algorithms found in the relevant directory -algorithm.initialise() + # Find out which algorithm the user has requested + alg = algorithm.get_module(app.ARGS.algorithm) -app.parse() + alg.check_output_paths() -# Find out which algorithm the user has requested -alg = algorithm.getModule(app.args.algorithm) + app.make_scratch_dir() + alg.get_inputs() + app.goto_scratch_dir() -app.checkOutputPath(app.args.output) -alg.checkOutputPaths() + alg.execute() -app.makeTempDir() -alg.getInputs() -app.gotoTempDir() + stderr = run.command('5ttcheck result.mif').stderr + if '[WARNING]' in stderr: + app.warn('Generated image does not perfectly conform to 5TT format:') + for line in stderr.splitlines(): + app.warn(line) -alg.execute() -stderr = run.command('5ttcheck result.mif')[1] -if stderr: - app.warn('Generated image does not perfectly conform to 5TT format:') - for line in stderr: - app.warn(line) -run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else '')) -app.complete() +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/average_response b/bin/average_response deleted file mode 100755 index 91ac18339a..0000000000 --- a/bin/average_response +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python -import sys - -def errorMessage(msg): - sys.stderr.write(msg + '\n') - sys.exit(1) - -if len(sys.argv) < 3 : - sys.stderr.write("example usage: average_response input_response1.txt input_response2.txt input_response3.txt ... output_average_response.txt\n") - sys.exit(1) - -num_bvals = 0 -num_coeff = 0 -num_subjects = len(sys.argv) - 2 -sys.stdout.write("Number of subjects: " + str(num_subjects) + '\n') - -for i in range(1, num_subjects + 1): - with open(sys.argv[i], 'r') as f: - lines = f.readlines() - if i == 1: - num_bvals = len(lines) - num_coeff = len(lines[0].split()) - for l in lines: - if len(l.split()) != num_coeff: - errorMessage("Error in file " + sys.argv[i] + ": multi-shell response functions must have the same number of coefficients per b-value (line)") - else: - if len(lines) != num_bvals: - errorMessage("Error in file " + sys.argv[i] + ": Input files do not contain the same number of b-values") - for l in lines: - if len(l.split()) != num_coeff: - errorMessage("Error in file " + sys.argv[i] + ": multi-shell response functions must have the same number of coefficients per b-value (line)") - -sys.stdout.write("Number of b-values: " + str(num_bvals) + '\n') - -# Can't use multiplication operator in outer loop since it duplicates by reference rather than value -average_response = [[0] * num_coeff for _ in range(num_bvals)] #pylint: disable=unused-variable -for i in range(1, num_subjects + 1): - with open(sys.argv[i], 'r') as f: - lines = f.readlines() - for l in range(0, num_bvals): - coeff = lines[l].split() - for c in range(0, num_coeff): - average_response[l][c] = average_response[l][c] + float(coeff[c]) / float(num_subjects) - -f = open(sys.argv[len(sys.argv)-1], 'w') -line_out = '' -for l in range(num_bvals): - line_out = '' - for c in range(0, num_coeff): - line_out = line_out + "{0:.5f}".format(average_response[l][c]) + ' ' - f.write("%s\n" % line_out) diff --git a/bin/blend b/bin/blend index 1677b074a4..b6ba2251a4 100755 --- a/bin/blend +++ b/bin/blend @@ -1,4 +1,20 @@ #!/usr/bin/env python2 + +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + import os import sys @@ -8,25 +24,25 @@ if len(sys.argv) <= 1: sys.stderr.write('eg: blend folder1 folder2 20 output_folder\n') sys.exit(1) -input_folder1 = sys.argv[1] -input_folder2 = sys.argv[2] -file_list1 = sorted(os.listdir(input_folder1)) -file_list2 = sorted(os.listdir(input_folder2)) -num_overlap = int(sys.argv[3]) -output_folder = sys.argv[4] +INPUT_FOLDER_1 = sys.argv[1] +INPUT_FOLDER_2 = sys.argv[2] +FILE_LIST_1 = sorted(os.listdir(INPUT_FOLDER_1)) +FILE_LIST_2 = sorted(os.listdir(INPUT_FOLDER_2)) +NUM_OVERLAP = int(sys.argv[3]) +OUTPUT_FOLDER = sys.argv[4] -if not os.path.exists(output_folder): - os.mkdir(output_folder) +if not os.path.exists(OUTPUT_FOLDER): + os.mkdir(OUTPUT_FOLDER) -total_num_output_frames = len(file_list1) + len(file_list2) - num_overlap -for i in range(total_num_output_frames): +NUM_OUTPUT_FRAMES = len(FILE_LIST_1) + len(FILE_LIST_2) - NUM_OVERLAP +for i in range(NUM_OUTPUT_FRAMES): file_name = 'frame' + '%0*d' % (5, i) + '.png' - if i <= len(file_list1) - num_overlap: - os.system('cp -L ' + input_folder1 + '/' + file_list1[i] + ' ' + output_folder + '/' + file_name) - if len(file_list1) - num_overlap < i < len(file_list1): - i2 = i - (len(file_list1) - num_overlap) - 1 - blend_amount = 100 * float(i2 + 1) / float(num_overlap) - os.system('convert ' + input_folder1 + '/' + file_list1[i] + ' ' + input_folder2 + '/' + file_list2[i2] + ' -alpha on -compose blend -define compose:args=' + str(blend_amount) + ' -gravity South -composite ' + output_folder + '/' + file_name) - if i >= (len(file_list1)): - i2 = i - (len(file_list1) - num_overlap) - 1 - os.system('cp -L ' + input_folder2 + '/' + file_list2[i2] + ' ' + output_folder + '/' + file_name) + if i <= len(FILE_LIST_1) - NUM_OVERLAP: + os.system('cp -L ' + INPUT_FOLDER_1 + '/' + FILE_LIST_1[i] + ' ' + OUTPUT_FOLDER + '/' + file_name) + if len(FILE_LIST_1) - NUM_OVERLAP < i < len(FILE_LIST_1): + i2 = i - (len(FILE_LIST_1) - NUM_OVERLAP) - 1 + blend_amount = 100 * float(i2 + 1) / float(NUM_OVERLAP) + os.system('convert ' + INPUT_FOLDER_1 + '/' + FILE_LIST_1[i] + ' ' + INPUT_FOLDER_2 + '/' + FILE_LIST_2[i2] + ' -alpha on -compose blend -define compose:args=' + str(blend_amount) + ' -gravity South -composite ' + OUTPUT_FOLDER + '/' + file_name) + if i >= (len(FILE_LIST_1)): + i2 = i - (len(FILE_LIST_1) - NUM_OVERLAP) - 1 + os.system('cp -L ' + INPUT_FOLDER_2 + '/' + FILE_LIST_2[i2] + ' ' + OUTPUT_FOLDER + '/' + file_name) diff --git a/bin/convert_bruker b/bin/convert_bruker index ecd39f9253..2269f37b9a 100755 --- a/bin/convert_bruker +++ b/bin/convert_bruker @@ -1,5 +1,20 @@ #!/usr/bin/env python +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + import sys, os.path if len (sys.argv) != 3: @@ -15,108 +30,114 @@ if not sys.argv[2].endswith ('.mih'): sys.stderr.write("expected .mih suffix as the second argument\n") sys.exit (1) -with open (os.path.join (os.path.dirname (sys.argv[1]), 'reco')) as f: - lines = f.read().split ('##$') - -with open (os.path.join (os.path.dirname (sys.argv[1]), '../../acqp')) as f: - lines += f.read().split ('##$') - -with open (os.path.join (os.path.dirname (sys.argv[1]), '../../method')) as f: - lines += f.read().split ('##$') - - -for line in lines: - line = line.lower() - if line.startswith ('reco_size='): - mat_size = line.splitlines()[1].split() - print ('mat_size', mat_size) - elif line.startswith ('nslices='): - nslices = line.split('=')[1].split()[0] - print ('nslices', nslices) - elif line.startswith ('acq_time_points='): - nacq = len (line.split('\n',1)[1].split()) - print ('nacq', nacq) - elif line.startswith ('reco_wordtype='): - wtype = line.split('=')[1].split()[0] - print ('wtype', wtype) - elif line.startswith ('reco_byte_order='): - byteorder = line.split('=')[1].split()[0] - print ('byteorder', byteorder) - elif line.startswith ('pvm_spatresol='): - res = line.splitlines()[1].split() - print ('res', res) - elif line.startswith ('pvm_spackarrslicedistance='): - slicethick = line.splitlines()[1].split()[0] - print ('slicethick', slicethick) - elif line.startswith ('pvm_dweffbval='): - bval = line.split('\n',1)[1].split() - print ('bval', bval) - elif line.startswith ('pvm_dwgradvec='): - bvec = line.split('\n',1)[1].split() - print ('bvec', bvec) - - -with open (sys.argv[2], 'w') as f: - f.write ('mrtrix image\ndim: ' + mat_size[0] + ',' + mat_size[1]) - if len(mat_size) > 2: - f.write (',' + str(mat_size[2])) - else: + + +def main(): + + with open (os.path.join (os.path.dirname (sys.argv[1]), 'reco')) as file_reco: + lines = file_reco.read().split ('##$') + + with open (os.path.join (os.path.dirname (sys.argv[1]), '../../acqp')) as file_acqp: + lines += file_acqp.read().split ('##$') + + with open (os.path.join (os.path.dirname (sys.argv[1]), '../../method')) as file_method: + lines += file_method.read().split ('##$') + + + for line in lines: + line = line.lower() + if line.startswith ('reco_size='): + mat_size = line.splitlines()[1].split() + print ('mat_size', mat_size) + elif line.startswith ('nslices='): + nslices = line.split('=')[1].split()[0] + print ('nslices', nslices) + elif line.startswith ('acq_time_points='): + nacq = len (line.split('\n',1)[1].split()) + print ('nacq', nacq) + elif line.startswith ('reco_wordtype='): + wtype = line.split('=')[1].split()[0] + print ('wtype', wtype) + elif line.startswith ('reco_byte_order='): + byteorder = line.split('=')[1].split()[0] + print ('byteorder', byteorder) + elif line.startswith ('pvm_spatresol='): + res = line.splitlines()[1].split() + print ('res', res) + elif line.startswith ('pvm_spackarrslicedistance='): + slicethick = line.splitlines()[1].split()[0] + print ('slicethick', slicethick) + elif line.startswith ('pvm_dweffbval='): + bval = line.split('\n',1)[1].split() + print ('bval', bval) + elif line.startswith ('pvm_dwgradvec='): + bvec = line.split('\n',1)[1].split() + print ('bvec', bvec) + + + with open (sys.argv[2], 'w') as file_out: + file_out.write ('mrtrix image\ndim: ' + mat_size[0] + ',' + mat_size[1]) + if len(mat_size) > 2: + file_out.write (',' + str(mat_size[2])) + else: + try: + nslices #pylint: disable=pointless-statement + file_out.write (',' + str(nslices)) + except: + pass + + try: + nacq #pylint: disable=pointless-statement + file_out.write (',' + str(nacq)) + except: + pass + + file_out.write ('\nvox: ' + str(res[0]) + ',' + str(res[1])) + if len(res) > 2: + file_out.write (',' + str(res[2])) + else: + try: + slicethick #pylint: disable=pointless-statement + file_out.write (',' + str(slicethick)) + except: + pass + try: + nacq #pylint: disable=pointless-statement + file_out.write (',') + except: + pass + + file_out.write ('\ndatatype: ') + if wtype == '_16bit_sgn_int': + file_out.write ('int16') + elif wtype == '_32bit_sgn_int': + file_out.write ('int32') + + if byteorder=='littleendian': + file_out.write ('le') + else: + file_out.write ('be') + + file_out.write ('\nlayout: +0,+1') try: nslices #pylint: disable=pointless-statement - f.write (',' + str(nslices)) + file_out.write (',+2') + except: + pass + try: + nacq #pylint: disable=pointless-statement + file_out.write (',+3') except: pass - try: - nacq #pylint: disable=pointless-statement - f.write (',' + str(nacq)) - except: - pass + file_out.write ('\nfile: ' + sys.argv[1] + '\n') - f.write ('\nvox: ' + str(res[0]) + ',' + str(res[1])) - if len(res) > 2: - f.write (',' + str(res[2])) - else: try: - slicethick #pylint: disable=pointless-statement - f.write (',' + str(slicethick)) + assert len(bvec) == 3*len(bval) + bvec = [ bvec[n:n+3] for n in range(0,len(bval),3) ] + for direction, value in zip(bvec, bval): + file_out.write ('dw_scheme: ' + direction[0] + ',' + direction[1] + ',' + str(-float(direction[2])) + ',' + value + '\n') except: pass - try: - nacq #pylint: disable=pointless-statement - f.write (',') - except: - pass - - f.write ('\ndatatype: ') - if wtype == '_16bit_sgn_int': - f.write ('int16') - elif wtype == '_32bit_sgn_int': - f.write ('int32') - - if byteorder=='littleendian': - f.write ('le') - else: - f.write ('be') - - f.write ('\nlayout: +0,+1') - try: - nslices #pylint: disable=pointless-statement - f.write (',+2') - except: - pass - try: - nacq #pylint: disable=pointless-statement - f.write (',+3') - except: - pass - - f.write ('\nfile: ' + sys.argv[1] + '\n') - - try: - assert len(bvec) == 3*len(bval) - bvec = [ bvec[n:n+3] for n in range(0,len(bval),3) ] - for direction, value in zip(bvec, bval): - f.write ('dw_scheme: ' + direction[0] + ',' + direction[1] + ',' + str(-float(direction[2])) + ',' + value + '\n') - except: - pass + +main() diff --git a/bin/dwi2response b/bin/dwi2response index 4255bc40ce..1bbb6e158c 100755 --- a/bin/dwi2response +++ b/bin/dwi2response @@ -1,113 +1,127 @@ #!/usr/bin/env python +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + # Script for estimating response functions for spherical deconvolution # A number of different approaches are available within this script for performing response function estimation. -# Make the corresponding MRtrix3 Python libraries available -import inspect, os, sys -lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib')) -if not os.path.isdir(lib_folder): - sys.stderr.write('Unable to locate MRtrix3 Python libraries') - sys.exit(1) -sys.path.insert(0, lib_folder) - -from mrtrix3 import algorithm, app, image, path, run - - -app.init('Robert E. Smith (robert.smith@florey.edu.au) and Thijs Dhollander (thijs.dhollander@gmail.com)', - 'Estimate response function(s) for spherical deconvolution') -app.cmdline.addDescription('dwi2response acts as a \'master\' script for performing various types of response function estimation; a range of different algorithms are available for completing this task. When using this script, the name of the algorithm to be used must appear as the first argument on the command-line after \'dwi2response\'. The subsequent compulsory arguments and options available depend on the particular algorithm being invoked.') -app.cmdline.addDescription('Each algorithm available also has its own help page, including necessary references; e.g. to see the help page of the \'fa\' algorithm, type \'dwi2response fa\'.') - -# General options -common_options = app.cmdline.add_argument_group('Options common to all dwi2response algorithms') -common_options.add_argument('-shells', help='The b-value shell(s) to use in response function estimation (single value for single-shell response, comma-separated list for multi-shell response)') -common_options.add_argument('-lmax', help='The maximum harmonic degree(s) of response function estimation (single value for single-shell response, comma-separated list for multi-shell response)') -common_options.add_argument('-mask', help='Provide an initial mask for response voxel selection') -common_options.add_argument('-voxels', help='Output an image showing the final voxel selection(s)') -common_options.add_argument('-grad', help='Pass the diffusion gradient table in MRtrix format') -common_options.add_argument('-fslgrad', nargs=2, metavar=('bvecs', 'bvals'), help='Pass the diffusion gradient table in FSL bvecs/bvals format') -app.cmdline.flagMutuallyExclusiveOptions( [ 'grad', 'fslgrad' ] ) - -# Import the command-line settings for all algorithms found in the relevant directory -algorithm.initialise() - - -app.parse() - - -# Find out which algorithm the user has requested -alg = algorithm.getModule(app.args.algorithm) - - -# Check for prior existence of output files, and grab any input files, used by the particular algorithm -if app.args.voxels: - app.checkOutputPath(app.args.voxels) -alg.checkOutputPaths() - - -# Sanitise some inputs, and get ready for data import -if app.args.lmax: - try: - lmax = [ int(x) for x in app.args.lmax.split(',') ] - if any([lmax_value%2 for lmax_value in lmax]): - app.error('Value of lmax must be even') - except: - app.error('Parameter lmax must be a number') - if alg.needsSingleShell() and not len(lmax) == 1: - app.error('Can only specify a single lmax value for single-shell algorithms') -shells_option = '' -if app.args.shells: - try: - shells_values = [ int(round(float(x))) for x in app.args.shells.split(',') ] - except: - app.error('-shells option should provide a comma-separated list of b-values') - if alg.needsSingleShell() and not len(shells_values) == 1: - app.error('Can only specify a single b-value shell for single-shell algorithms') - shells_option = ' -shells ' + app.args.shells -singleshell_option = '' -if alg.needsSingleShell(): - singleshell_option = ' -singleshell -no_bzero' - -grad_import_option = '' -if app.args.grad: - grad_import_option = ' -grad ' + path.fromUser(app.args.grad, True) -elif app.args.fslgrad: - grad_import_option = ' -fslgrad ' + path.fromUser(app.args.fslgrad[0], True) + ' ' + path.fromUser(app.args.fslgrad[1], True) -elif 'dw_scheme' not in image.Header(path.fromUser(app.args.input, False)).keyval(): - app.error('Script requires diffusion gradient table: either in image header, or using -grad / -fslgrad option') - -app.makeTempDir() - -# Get standard input data into the temporary directory -if alg.needsSingleShell() or shells_option: - run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' - -strides 0,0,0,1' + grad_import_option + ' | dwiextract - ' + path.toTemp('dwi.mif', True) + shells_option + singleshell_option) -else: # Don't discard b=0 in multi-shell algorithms - run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('dwi.mif', True) + ' -strides 0,0,0,1' + grad_import_option) -if app.args.mask: - run.command('mrconvert ' + path.fromUser(app.args.mask, True) + ' ' + path.toTemp('mask.mif', True) + ' -datatype bit') - -alg.getInputs() - -app.gotoTempDir() - - -if app.args.mask: - # Check that the brain mask is appropriate - if image.Header('mask.mif').size()[:3] != image.Header('dwi.mif').size()[:3]: - app.error('Dimensions of provided mask image do not match DWI') -else: - run.command('dwi2mask dwi.mif mask.mif') - -if int(image.statistic('mask.mif', 'count', '-mask mask.mif')) == 0: - app.error(('Provided' if app.args.mask else 'Generated') + ' mask image does not contain any voxels') - - -# From here, the script splits depending on what estimation algorithm is being used -alg.execute() - - -# Finalize for all algorithms -if app.args.voxels: - run.command('mrconvert voxels.mif ' + path.fromUser(app.args.voxels, True) + (' -force' if app.forceOverwrite else '')) -app.complete() + + +def usage(cmdline): #pylint: disable=unused-variable + from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module, import-outside-toplevel + + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Thijs Dhollander (thijs.dhollander@gmail.com)') + cmdline.set_synopsis('Estimate response function(s) for spherical deconvolution') + cmdline.add_description('dwi2response offers different algorithms for performing various types of response function estimation. The name of the algorithm must appear as the first argument on the command-line after \'dwi2response\'. The subsequent arguments and options depend on the particular algorithm being invoked.') + cmdline.add_description('Each algorithm available has its own help page, including necessary references; e.g. to see the help page of the \'fa\' algorithm, type \'dwi2response fa\'.') + + # General options + common_options = cmdline.add_argument_group('General dwi2response options') + common_options.add_argument('-mask', help='Provide an initial mask for response voxel selection') + common_options.add_argument('-voxels', help='Output an image showing the final voxel selection(s)') + common_options.add_argument('-shells', help='The b-value(s) to use in response function estimation (comma-separated list in case of multiple b-values, b=0 must be included explicitly)') + common_options.add_argument('-lmax', help='The maximum harmonic degree(s) for response function estimation (comma-separated list in case of multiple b-values)') + app.add_dwgrad_import_options(cmdline) + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) + + + + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel + + # Find out which algorithm the user has requested + alg = algorithm.get_module(app.ARGS.algorithm) + + # Check for prior existence of output files, and grab any input files, used by the particular algorithm + if app.ARGS.voxels: + app.check_output_path(app.ARGS.voxels) + alg.check_output_paths() + + # Sanitise some inputs, and get ready for data import + if app.ARGS.lmax: + try: + lmax = [ int(x) for x in app.ARGS.lmax.split(',') ] + if any([lmax_value%2 for lmax_value in lmax]): + raise MRtrixError('Value of lmax must be even') + except: + raise MRtrixError('Parameter lmax must be a number') + if alg.needs_single_shell() and not len(lmax) == 1: + raise MRtrixError('Can only specify a single lmax value for single-shell algorithms') + shells_option = '' + if app.ARGS.shells: + try: + shells_values = [ int(round(float(x))) for x in app.ARGS.shells.split(',') ] + except: + raise MRtrixError('-shells option should provide a comma-separated list of b-values') + if alg.needs_single_shell() and not len(shells_values) == 1: + raise MRtrixError('Can only specify a single b-value shell for single-shell algorithms') + shells_option = ' -shells ' + app.ARGS.shells + singleshell_option = '' + if alg.needs_single_shell(): + singleshell_option = ' -singleshell -no_bzero' + + grad_import_option = app.read_dwgrad_import_options() + if not grad_import_option and 'dw_scheme' not in image.Header(path.from_user(app.ARGS.input, False)).keyval(): + raise MRtrixError('Script requires diffusion gradient table: either in image header, or using -grad / -fslgrad option') + + app.make_scratch_dir() + + # Get standard input data into the scratch directory + if alg.needs_single_shell() or shells_option: + app.console('Importing DWI data (' + path.from_user(app.ARGS.input) + ') and selecting b-values...') + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' - -strides 0,0,0,1' + grad_import_option + ' | dwiextract - ' + path.to_scratch('dwi.mif') + shells_option + singleshell_option, show=False) + else: # Don't discard b=0 in multi-shell algorithms + app.console('Importing DWI data (' + path.from_user(app.ARGS.input) + ')...') + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('dwi.mif') + ' -strides 0,0,0,1' + grad_import_option, show=False) + if app.ARGS.mask: + app.console('Importing mask (' + path.from_user(app.ARGS.mask) + ')...') + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit', show=False) + + alg.get_inputs() + + app.goto_scratch_dir() + + if app.ARGS.mask: + # Check that the brain mask is appropriate + mask_header = image.Header('mask.mif') + if mask_header.size()[:3] != image.Header('dwi.mif').size()[:3]: + raise MRtrixError('Dimensions of provided mask image do not match DWI') + if not (len(mask_header.size()) == 3 or (len(mask_header.size()) == 4 and mask_header.size()[3] == 1)): + raise MRtrixError('Provided mask image needs to be a 3D image') + else: + app.console('Computing brain mask (dwi2mask)...') + run.command('dwi2mask dwi.mif mask.mif', show=False) + + if not image.statistics('mask.mif', mask='mask.mif').count: + raise MRtrixError(('Provided' if app.ARGS.mask else 'Generated') + ' mask image does not contain any voxels') + + # From here, the script splits depending on what estimation algorithm is being used + alg.execute() + + + + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwibiascorrect b/bin/dwibiascorrect index b8ca354ffe..90ce634d2e 100755 --- a/bin/dwibiascorrect +++ b/bin/dwibiascorrect @@ -1,146 +1,83 @@ #!/usr/bin/env python +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + # Script that performs B1 field inhomogeneity correction for a DWI volume series -# Bias field is estimated using the mean b=0 image, and subsequently used to correct all volumes - -# Make the corresponding MRtrix3 Python libraries available -import inspect, os, sys -lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib')) -if not os.path.isdir(lib_folder): - sys.stderr.write('Unable to locate MRtrix3 Python libraries') - sys.exit(1) -sys.path.insert(0, lib_folder) - -from distutils.spawn import find_executable -from mrtrix3 import app, fsl, image, path, run - -opt_N4BiasFieldCorrection = { - 's': ('4','shrink-factor applied to spatial dimensions'), - 'b':('[100,3]','[initial mesh resolution in mm, spline order] This value is optimised for human adult data and needs to be adjusted for rodent data.'), - 'c':('[1000,0.0]', '[numberOfIterations,convergenceThreshold]')} - -app.init('Robert E. Smith (robert.smith@florey.edu.au)', - 'Perform B1 field inhomogeneity correction for a DWI volume series') -app.cmdline.addCitation('If using -fast option', 'Zhang, Y.; Brady, M. & Smith, S. Segmentation of brain MR images through a hidden Markov random field model and the expectation-maximization algorithm. IEEE Transactions on Medical Imaging, 2001, 20, 45-57', True) -app.cmdline.addCitation('If using -fast option', 'Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', True) -app.cmdline.addCitation('If using -ants option', 'Tustison, N.; Avants, B.; Cook, P.; Zheng, Y.; Egan, A.; Yushkevich, P. & Gee, J. N4ITK: Improved N3 Bias Correction. IEEE Transactions on Medical Imaging, 2010, 29, 1310-1320', True) -antsoptions = app.cmdline.add_argument_group('Options for ANTS N4BiasFieldCorrection') -for key in sorted(opt_N4BiasFieldCorrection): - antsoptions.add_argument('-ants.'+key, metavar=opt_N4BiasFieldCorrection[key][0], help='N4BiasFieldCorrection option -%s. %s' % (key,opt_N4BiasFieldCorrection[key][1])) - app.cmdline.flagMutuallyExclusiveOptions( [ '-ants.'+key, 'fsl' ] ) -app.cmdline.add_argument('input', help='The input image series to be corrected') -app.cmdline.add_argument('output', help='The output corrected image series') -options = app.cmdline.add_argument_group('Options for the dwibiascorrect script') -options.add_argument('-mask', help='Manually provide a mask image for bias field estimation') -options.add_argument('-bias', help='Output the estimated bias field') -options.add_argument('-ants', action='store_true', help='Use ANTS N4 to estimate the inhomogeneity field') -options.add_argument('-fsl', action='store_true', help='Use FSL FAST to estimate the inhomogeneity field') -app.cmdline.flagMutuallyExclusiveOptions( [ 'ants', 'fsl' ] ) -options.add_argument('-grad', help='Pass the diffusion gradient table in MRtrix format') -options.add_argument('-fslgrad', nargs=2, metavar=('bvecs', 'bvals'), help='Pass the diffusion gradient table in FSL bvecs/bvals format') -app.cmdline.flagMutuallyExclusiveOptions( [ 'grad', 'fslgrad' ] ) -app.parse() - -if app.args.fsl: - - if app.isWindows(): - app.error('Script cannot run using FSL on Windows due to FSL dependency') - - fsl_path = os.environ.get('FSLDIR', '') - if not fsl_path: - app.error('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - - fast_cmd = fsl.exeName('fast') - - app.warn('Use of -fsl option in dwibiascorrect script is discouraged due to its strong dependence ' + \ - 'on initial brain masking, and its inability to correct voxels outside of this mask.' + \ - 'Use of the -ants option is recommended for quantitative DWI analyses.') - -elif app.args.ants: - - if not find_executable('N4BiasFieldCorrection'): - app.error('Could not find ANTS program N4BiasFieldCorrection; please check installation') - - for key in sorted(opt_N4BiasFieldCorrection): - if hasattr(app.args, 'ants.'+key): - val = getattr(app.args, 'ants.'+key) - if val is not None: - opt_N4BiasFieldCorrection[key] = (val, 'user defined') - ants_options = ' '.join(['-%s %s' %(k, v[0]) for k, v in opt_N4BiasFieldCorrection.items()]) -else: - app.error('No bias field estimation algorithm specified') - -grad_import_option = '' -if app.args.grad: - grad_import_option = ' -grad ' + path.fromUser(app.args.grad, True) -elif app.args.fslgrad: - grad_import_option = ' -fslgrad ' + path.fromUser(app.args.fslgrad[0], True) + ' ' + path.fromUser(app.args.fslgrad[1], True) - -app.checkOutputPath(app.args.output) -app.checkOutputPath(app.args.bias) - -app.makeTempDir() - -run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('in.mif', True) + grad_import_option) -if app.args.mask: - run.command('mrconvert ' + path.fromUser(app.args.mask, True) + ' ' + path.toTemp('mask.mif', True)) - -app.gotoTempDir() - -# Make sure it's actually a DWI that's been passed -dwi_header = image.Header('in.mif') -if len(dwi_header.size()) != 4: - app.error('Input image must be a 4D image') -if 'dw_scheme' not in dwi_header.keyval(): - app.error('No valid DW gradient scheme provided or present in image header') -if len(dwi_header.keyval()['dw_scheme']) != dwi_header.size()[3]: - app.error('DW gradient scheme contains different number of entries (' + str(len(dwi_header.keyval()['dw_scheme'])) + ' to number of volumes in DWIs (' + dwi_header.size()[3] + ')') - -# Generate a brain mask if required, or check the mask if provided -if app.args.mask: - if image.Header('mask.mif').size()[:3] != dwi_header.size()[:3]: - app.error('Provided mask image does not match input DWI') -else: - run.command('dwi2mask in.mif mask.mif') - -# Generate a mean b=0 image -run.command('dwiextract in.mif - -bzero | mrmath - mean mean_bzero.mif -axis 3') - -if app.args.fsl: - - # FAST doesn't accept a mask input; therefore need to explicitly mask the input image - run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrconvert - mean_bzero_masked.nii -strides -1,+2,+3') - run.command(fast_cmd + ' -t 2 -o fast -n 3 -b mean_bzero_masked.nii') - bias_path = fsl.findImage('fast_bias') - -elif app.args.ants: - - # If the mask image was provided manually, and doesn't match the input image perfectly - # (i.e. also transform and voxel sizes), N4 will fail - if app.args.mask: - if not image.match('mean_bzero.mif', 'mask.mif'): - app.error('Input mask header does not perfectly match DWI as required by N4') - - # Use the brain mask as a weights image rather than a mask; means that voxels at the edge of the mask - # will have a smoothly-varying bias field correction applied, rather than multiplying by 1.0 outside the mask - run.command('mrconvert mean_bzero.mif mean_bzero.nii -strides +1,+2,+3') - run.command('mrconvert mask.mif mask.nii -strides +1,+2,+3') - init_bias_path = 'init_bias.nii' - corrected_path = 'corrected.nii' - run.command('N4BiasFieldCorrection -d 3 -i mean_bzero.nii -w mask.nii -o [' + corrected_path + ',' + init_bias_path + '] ' + ants_options) - - # N4 can introduce large differences between subjects via a global scaling of the bias field - # Estimate this scaling based on the total integral of the pre- and post-correction images within the brain mask - input_integral = float(run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrmath - sum - -axis 0 | mrmath - sum - -axis 1 | mrmath - sum - -axis 2 | mrdump -')[0]) - output_integral = float(run.command('mrcalc ' + corrected_path + ' mask.mif -mult - | mrmath - sum - -axis 0 | mrmath - sum - -axis 1 | mrmath - sum - -axis 2 | mrdump -')[0]) - app.var(input_integral, output_integral) - bias_path = 'bias.mif' - run.command('mrcalc ' + init_bias_path + ' ' + str(output_integral / input_integral) + ' -mult ' + bias_path) - - - -run.command('mrcalc in.mif ' + bias_path + ' -div result.mif') -run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else '')) -if app.args.bias: - run.command('mrconvert ' + bias_path + ' ' + path.fromUser(app.args.bias, True) + (' -force' if app.forceOverwrite else '')) -app.complete() +# Bias field is typically estimated using the mean b=0 image, and subsequently used to correct all volumes + + +def usage(cmdline): #pylint: disable=unused-variable + from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module, import-outside-toplevel + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Perform B1 field inhomogeneity correction for a DWI volume series') + common_options = cmdline.add_argument_group('Options common to all dwibiascorrect algorithms') + common_options.add_argument('-mask', metavar='image', help='Manually provide a mask image for bias field estimation') + common_options.add_argument('-bias', metavar='image', help='Output the estimated bias field') + app.add_dwgrad_import_options(cmdline) + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel + + # Find out which algorithm the user has requested + alg = algorithm.get_module(app.ARGS.algorithm) + + app.check_output_path(app.ARGS.output) + app.check_output_path(app.ARGS.bias) + alg.check_output_paths() + + app.make_scratch_dir() + + grad_import_option = app.read_dwgrad_import_options() + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif') + grad_import_option) + if app.ARGS.mask: + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') + + alg.get_inputs() + + app.goto_scratch_dir() + + # Make sure it's actually a DWI that's been passed + dwi_header = image.Header('in.mif') + if len(dwi_header.size()) != 4: + raise MRtrixError('Input image must be a 4D image') + if 'dw_scheme' not in dwi_header.keyval(): + raise MRtrixError('No valid DW gradient scheme provided or present in image header') + if len(dwi_header.keyval()['dw_scheme']) != dwi_header.size()[3]: + raise MRtrixError('DW gradient scheme contains different number of entries (' + str(len(dwi_header.keyval()['dw_scheme'])) + ' to number of volumes in DWIs (' + dwi_header.size()[3] + ')') + + # Generate a brain mask if required, or check the mask if provided by the user + if app.ARGS.mask: + if not image.match('in.mif', 'mask.mif', up_to_dim=3): + raise MRtrixError('Provided mask image does not match input DWI') + else: + run.command('dwi2mask in.mif mask.mif') + + # From here, the script splits depending on what estimation algorithm is being used + alg.execute() + + + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwicat b/bin/dwicat new file mode 100755 index 0000000000..20b93d9da3 --- /dev/null +++ b/bin/dwicat @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + + + +import json + + + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Lena Dorfschmidt (ld548@cam.ac.uk) and Jakub Vohryzek (jakub.vohryzek@queens.ox.ac.uk) and Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Concatenating multiple DWI series accounting for differential intensity scaling') + cmdline.add_description('This script concatenates two or more 4D DWI series, accounting for the ' + 'fact that there may be differences in intensity scaling between those series. ' + 'This intensity scaling is corrected by determining scaling factors that will ' + 'make the overall image intensities in the b=0 volumes of each series approximately ' + 'equivalent.') + cmdline.add_argument('inputs', nargs='+', help='Multiple input diffusion MRI series') + cmdline.add_argument('output', help='The output image series (all DWIs concatenated)') + cmdline.add_argument('-mask', metavar='image', help='Provide a binary mask within which image intensities will be matched') + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel + + num_inputs = len(app.ARGS.inputs) + if num_inputs < 2: + raise MRtrixError('Script requires at least two input image series') + + # check input data + def check_header(header): + if len(header.size()) != 4: + raise MRtrixError('Image "' + header.name() + '" is not a 4D image series') + if not 'dw_scheme' in header.keyval(): + raise MRtrixError('Image "' + header.name() + '" does not contain a gradient table') + num_grad_lines = len(header.keyval()['dw_scheme']) + if num_grad_lines != header.size()[3]: + raise MRtrixError('Number of lines in gradient table for image "' + header.name() + '" (' + str(num_grad_lines) + ') does not match number of volumes (' + str(header.size()[3]) + ')') + + first_header = image.Header(path.from_user(app.ARGS.inputs[0], False)) + check_header(first_header) + warn_protocol_mismatch = False + for filename in app.ARGS.inputs[1:]: + this_header = image.Header(path.from_user(filename, False)) + check_header(this_header) + if this_header.size()[0:3] != first_header.size()[0:3]: + raise MRtrixError('Spatial dimensions of image "' + filename + '" do not match those of first image "' + first_header.name() + '"') + for field_name in [ 'EchoTime', 'RepetitionTime', 'FlipAngle' ]: + first_value = first_header.keyval().get(field_name) + this_value = this_header.keyval().get(field_name) + if first_value and this_value and first_value != this_value: + warn_protocol_mismatch = True + if warn_protocol_mismatch: + app.warn('Mismatched protocol acquisition parameters detected between input images; ' + \ + 'the assumption of equivalent intensities between b=0 volumes of different inputs underlying operation of this script may not be valid') + if app.ARGS.mask: + mask_header = image.Header(path.from_user(app.ARGS.mask, False)) + if mask_header.size()[0:3] != first_header.size()[0:3]: + raise MRtrixError('Spatial dimensions of mask image "' + app.ARGS.mask + '" do not match those of first image "' + first_header.name() + '"') + + # check output path + app.check_output_path(path.from_user(app.ARGS.output, False)) + + # import data to scratch directory + app.make_scratch_dir() + for index, filename in enumerate(app.ARGS.inputs): + run.command('mrconvert ' + path.from_user(filename) + ' ' + path.to_scratch(str(index) + 'in.mif')) + if app.ARGS.mask: + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') + app.goto_scratch_dir() + + # extract b=0 volumes within each input series + for index in range(0, num_inputs): + run.command('dwiextract ' + str(index) + 'in.mif ' + str(index) + 'b0.mif -bzero') + + mask_option = ' -mask_input mask.mif -mask_target mask.mif' if app.ARGS.mask else '' + + # for all but the first image series: + # - find multiplicative factor to match b=0 images to those of the first image + # - apply multiplicative factor to whole image series + # It would be better to not preferentially treat one of the inputs differently to any other: + # - compare all inputs to all other inputs + # - determine one single appropriate scaling factor for each image based on all results + # can't do a straight geometric average: e.g. if run for 2 images, each would map to + # the the input intensoty of the other image, and so the two corrected images would not match + # should be some mathematical theorem providing the optimal scaling factor for each image + # based on the resulting matrix of optimal scaling factors + filelist = [ '0in.mif' ] + for index in range(1, num_inputs): + stderr_text = run.command('mrhistmatch scale ' + str(index) + 'b0.mif 0b0.mif ' + str(index) + 'rescaledb0.mif' + mask_option).stderr + scaling_factor = None + for line in stderr_text.splitlines(): + if 'Estimated scale factor is' in line: + try: + scaling_factor = float(line.split()[-1]) + except ValueError: + raise MRtrixError('Unable to convert scaling factor from mrhistmatch output to floating-point number') + break + if scaling_factor is None: + raise MRtrixError('Unable to extract scaling factor from mrhistmatch output') + filename = str(index) + 'rescaled.mif' + run.command('mrcalc ' + str(index) + 'in.mif ' + str(scaling_factor) + ' -mult ' + filename) + filelist.append(filename) + + # concatenate all series together + run.command('mrcat ' + ' '.join(filelist) + ' - -axis 3 | ' + \ + 'mrconvert - result.mif -json_export result_init.json -strides 0,0,0,1') + + # remove current contents of command_history, since there's no sensible + # way to choose from which input image the contents should be taken; + # we do however want to keep other contents of keyval (e.g. gradient table) + with open('result_init.json', 'r') as input_json_file: + keyval = json.load(input_json_file) + keyval.pop('command_history', None) + with open('result_final.json', 'w') as output_json_file: + json.dump(keyval, output_json_file) + + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval='result_final.json', force=app.FORCE_OVERWRITE) + + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwifslpreproc b/bin/dwifslpreproc new file mode 100755 index 0000000000..53bfb0afe1 --- /dev/null +++ b/bin/dwifslpreproc @@ -0,0 +1,1211 @@ +#!/usr/bin/env python + +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# Script for performing DWI pre-processing using FSL 5.0 (onwards) tools eddy / topup / applytopup + + + +import itertools, json, math, os, shutil, sys +from distutils.spawn import find_executable + + + +def usage(cmdline): #pylint: disable=unused-variable + from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Perform diffusion image pre-processing using FSL\'s eddy tool; including inhomogeneity distortion correction using FSL\'s topup tool if possible') + cmdline.add_description('This script is intended to provide convenience of use of the FSL software tools topup and eddy for performing DWI pre-processing, by encapsulating some of the surrounding image data and metadata processing steps. It is intended to simply these processing steps for most commonly-used DWI acquisition strategies, whilst also providing support for some more exotic acquisitions. The "example usage" section demonstrates the ways in which the script can be used based on the (compulsory) -rpe_* command-line options.') + cmdline.add_description('The "-topup_options" and "-eddy_options" command-line options allow the user to pass desired command-line options directly to the FSL commands topup and eddy. The available options for those commands may vary between versions of FSL; users can interrogate such by querying the help pages of the installed software, and/or the FSL online documentation: (topup) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/TopupUsersGuide ; (eddy) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/UsersGuide') + cmdline.add_description('The script will attempt to run the CUDA version of eddy; if this does not succeed for any reason, or is not present on the system, the CPU version will be attempted instead. By default, the CUDA eddy binary found that indicates compilation against the most recent version of CUDA will be attempted; this can be over-ridden by providing a soft-link "eddy_cuda" within your path that links to the binary you wish to be executed.') + cmdline.add_description('Note that this script does not perform any explicit registration between images provided to topup via the -se_epi option, and the DWI volumes provided to eddy. In some instances (motion between acquisitions) this can result in erroneous application of the inhomogeneity field during distortion correction. Use of the -align_seepi option is advocated in this scenario, which ensures that the first volume in the series provided to eddy is also the first volume in the series provided to eddy, guaranteeing alignment. But a prerequisite for this approach is that the image contrast within the images provided to the -se_epi option must match the b=0 volumes present within the input DWI series: this means equivalent TE, TR and flip angle (note that differences in multi-band factors between two acquisitions may lead to differences in TR).') + cmdline.add_example_usage('A basic DWI acquisition, where all image volumes are acquired in a single protocol with fixed phase encoding', + 'dwifslpreproc DWI_in.mif DWI_out.mif -rpe_none -pe_dir ap -readout_time 0.55', + 'Due to use of a single fixed phase encoding, no EPI distortion correction can be applied in this case.') + cmdline.add_example_usage('DWIs all acquired with a single fixed phase encoding; but additionally a pair of b=0 images with reversed phase encoding to estimate the inhomogeneity field', + 'mrcat b0_ap.mif b0_pa.mif b0_pair.mif -axis 3; dwifslpreproc DWI_in.mif DWI_out.mif -rpe_pair -se_epi b0_pair.mif -pe_dir ap -readout_time 0.72 -align_seepi', + 'Here the two individual b=0 volumes are concatenated into a single 4D image series, and this is provided to the script via the -se_epi option. Note that with the -rpe_pair option used here, which indicates that the SE-EPI image series contains one or more pairs of b=0 images with reversed phase encoding, the FIRST HALF of the volumes in the SE-EPI series must possess the same phase encoding as the input DWI series, while the second half are assumed to contain the opposite phase encoding direction but identical total readout time. Use of the -align_seepi option is advocated as long as its use is valid (more information in the Description section).') + cmdline.add_example_usage('All DWI directions & b-values are acquired twice, with the phase encoding direction of the second acquisition protocol being reversed with respect to the first', + 'mrcat DWI_lr.mif DWI_rl.mif DWI_all.mif -axis 3; dwifslpreproc DWI_all.mif DWI_out.mif -rpe_all -pe_dir lr -readout_time 0.66', + 'Here the two acquisition protocols are concatenated into a single DWI series containing all acquired volumes. The direction indicated via the -pe_dir option should be the direction of phase encoding used in acquisition of the FIRST HALF of volumes in the input DWI series; ie. the first of the two files that was provided to the mrcat command. In this usage scenario, the output DWI series will contain the same number of image volumes as ONE of the acquired DWI series (ie. half of the number in the concatenated series); this is because the script will identify pairs of volumes that possess the same diffusion sensitisation but reversed phase encoding, and perform explicit recombination of those volume pairs in such a way that image contrast in regions of inhomogeneity is determined from the stretched rather than the compressed image.') + cmdline.add_example_usage('Any acquisition scheme that does not fall into one of the example usages above', + 'mrcat DWI_*.mif DWI_all.mif -axis 3; mrcat b0_*.mif b0_all.mif -axis 3; dwifslpreproc DWI_all.mif DWI_out.mif -rpe_header -se_epi b0_all.mif -align_seepi', + 'With this usage, the relevant phase encoding information is determined entirely based on the contents of the relevant image headers, and dwifslpreproc prepares all metadata for the executed FSL commands accordingly. This can therefore be used if the particular DWI acquisition strategy used does not correspond to one of the simple examples as described in the prior examples. This usage is predicated on the headers of the input files containing appropriately-named key-value fields such that MRtrix3 tools identify them as such. In some cases, conversion from DICOM using MRtrix3 commands will automatically extract and embed this information; however this is not true for all scanner vendors and/or software versions. In the latter case it may be possible to manually provide these metadata; either using the -json_import command-line option of dwifslpreproc, or the -json_import or one of the -import_pe_* command-line options of MRtrix3\'s mrconvert command (and saving in .mif format) prior to running dwifslpreproc.') + cmdline.add_citation('Andersson, J. L. & Sotiropoulos, S. N. An integrated approach to correction for off-resonance effects and subject movement in diffusion MR imaging. NeuroImage, 2015, 125, 1063-1078', is_external=True) + cmdline.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) + cmdline.add_citation('Skare, S. & Bammer, R. Jacobian weighting of distortion corrected EPI data. Proceedings of the International Society for Magnetic Resonance in Medicine, 2010, 5063', condition='If performing recombination of diffusion-weighted volume pairs with opposing phase encoding directions', is_external=True) + cmdline.add_citation('Andersson, J. L.; Skare, S. & Ashburner, J. How to correct susceptibility distortions in spin-echo echo-planar images: application to diffusion tensor imaging. NeuroImage, 2003, 20, 870-888', condition='If performing EPI susceptibility distortion correction', is_external=True) + cmdline.add_citation('Andersson, J. L. R.; Graham, M. S.; Zsoldos, E. & Sotiropoulos, S. N. Incorporating outlier detection and replacement into a non-parametric framework for movement and distortion correction of diffusion MR images. NeuroImage, 2016, 141, 556-572', condition='If including "--repol" in -eddy_options input', is_external=True) + cmdline.add_citation('Andersson, J. L. R.; Graham, M. S.; Drobnjak, I.; Zhang, H.; Filippini, N. & Bastiani, M. Towards a comprehensive framework for movement and distortion correction of diffusion MR images: Within volume movement. NeuroImage, 2017, 152, 450-466', condition='If including "--mporder" in -eddy_options input', is_external=True) + cmdline.add_citation('Bastiani, M.; Cottaar, M.; Fitzgibbon, S.P.; Suri, S.; Alfaro-Almagro, F.; Sotiropoulos, S.N.; Jbabdi, S.; Andersson, J.L.R. Automated quality control for within and between studies diffusion MRI data using a non-parametric framework for movement and distortion correction. NeuroImage, 2019, 184, 801-812', condition='If using -eddyqc_test or -eddyqc_all option and eddy_quad is installed', is_external=True) + cmdline.add_argument('input', help='The input DWI series to be corrected') + cmdline.add_argument('output', help='The output corrected image series') + cmdline.add_argument('-pe_dir', metavar=('PE'), help='Manually specify the phase encoding direction of the input series; can be a signed axis number (e.g. -0, 1, +2), an axis designator (e.g. RL, PA, IS), or NIfTI axis codes (e.g. i-, j, k)') + cmdline.add_argument('-readout_time', metavar=('time'), type=float, help='Manually specify the total readout time of the input series (in seconds)') + cmdline.add_argument('-se_epi', metavar=('image'), help='Provide an additional image series consisting of spin-echo EPI images, which is to be used exclusively by topup for estimating the inhomogeneity field (i.e. it will not form part of the output image series)') + cmdline.add_argument('-align_seepi', action='store_true', help='Achieve alignment between the SE-EPI images used for inhomogeneity field estimation, and the DWIs (more information in Description section)') + cmdline.add_argument('-json_import', metavar=('file'), help='Import image header information from an associated JSON file (may be necessary to determine phase encoding information)') + cmdline.add_argument('-topup_options', metavar=('" TopupOptions"'), help='Manually provide additional command-line options to the topup command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to topup)') + cmdline.add_argument('-eddy_options', metavar=('" EddyOptions"'), help='Manually provide additional command-line options to the eddy command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to eddy)') + cmdline.add_argument('-eddy_mask', metavar=('image'), help='Provide a processing mask to use for eddy, instead of having dwifslpreproc generate one internally using dwi2mask') + cmdline.add_argument('-eddy_slspec', metavar=('file'), help='Provide a file containing slice groupings for eddy\'s slice-to-volume registration') + cmdline.add_argument('-eddyqc_text', metavar=('directory'), help='Copy the various text-based statistical outputs generated by eddy, and the output of eddy_qc (if installed), into an output directory') + cmdline.add_argument('-eddyqc_all', metavar=('directory'), help='Copy ALL outputs generated by eddy (including images), and the output of eddy_qc (if installed), into an output directory') + app.add_dwgrad_export_options(cmdline) + app.add_dwgrad_import_options(cmdline) + rpe_options = cmdline.add_argument_group('Options for specifying the acquisition phase-encoding design; note that one of the -rpe_* options MUST be provided') + rpe_options.add_argument('-rpe_none', action='store_true', help='Specify that no reversed phase-encoding image data is being provided; eddy will perform eddy current and motion correction only') + rpe_options.add_argument('-rpe_pair', action='store_true', help='Specify that a set of images (typically b=0 volumes) will be provided for use in inhomogeneity field estimation only (using the -se_epi option)') + rpe_options.add_argument('-rpe_all', action='store_true', help='Specify that ALL DWIs have been acquired with opposing phase-encoding') + rpe_options.add_argument('-rpe_header', action='store_true', help='Specify that the phase-encoding information can be found in the image header(s), and that this is the information that the script should use') + cmdline.flag_mutually_exclusive_options( [ 'rpe_none', 'rpe_pair', 'rpe_all', 'rpe_header' ], True ) + cmdline.flag_mutually_exclusive_options( [ 'rpe_none', 'se_epi' ], False ) # May still technically provide -se_epi even with -rpe_all + cmdline.flag_mutually_exclusive_options( [ 'rpe_header', 'pe_dir' ], False ) # Can't manually provide phase-encoding direction if expecting it to be in the header + cmdline.flag_mutually_exclusive_options( [ 'rpe_header', 'readout_time' ], False ) # Can't manually provide readout time if expecting it to be in the header + cmdline.flag_mutually_exclusive_options( [ 'eddyqc_text', 'eddyqc_all' ], False ) + + + + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import app, fsl, image, matrix, path, phaseencoding, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel + + if utils.is_windows(): + raise MRtrixError('Script cannot run on Windows due to FSL dependency') + + image.check_3d_nonunity(path.from_user(app.ARGS.input, False)) + + pe_design = '' + if app.ARGS.rpe_none: + pe_design = 'None' + elif app.ARGS.rpe_pair: + pe_design = 'Pair' + if not app.ARGS.se_epi: + raise MRtrixError('If using the -rpe_pair option, the -se_epi option must be used to provide the spin-echo EPI data to be used by topup') + elif app.ARGS.rpe_all: + pe_design = 'All' + elif app.ARGS.rpe_header: + pe_design = 'Header' + else: + raise MRtrixError('Must explicitly specify phase-encoding acquisition design (even if none)') + + if app.ARGS.align_seepi and not app.ARGS.se_epi: + raise MRtrixError('-align_seepi option is only applicable when the -se_epi option is also used') + + fsl_path = os.environ.get('FSLDIR', '') + if not fsl_path: + raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') + + if not pe_design == 'None': + topup_config_path = os.path.join(fsl_path, 'etc', 'flirtsch', 'b02b0.cnf') + if not os.path.isfile(topup_config_path): + raise MRtrixError('Could not find necessary default config file for FSL topup command (expected location: ' + topup_config_path + ')') + topup_cmd = fsl.exe_name('topup') + applytopup_cmd = fsl.exe_name('applytopup') + + if not fsl.eddy_binary(True) and not fsl.eddy_binary(False): + raise MRtrixError('Could not find any version of FSL eddy command') + fsl_suffix = fsl.suffix() + app.check_output_path(app.ARGS.output) + + # Export the gradient table to the path requested by the user if necessary + grad_export_option = app.read_dwgrad_export_options() + + + eddyqc_path = None + eddyqc_files = [ 'eddy_parameters', 'eddy_movement_rms', 'eddy_restricted_movement_rms', \ + 'eddy_post_eddy_shell_alignment_parameters', 'eddy_post_eddy_shell_PE_translation_parameters', \ + 'eddy_outlier_report', 'eddy_outlier_map', 'eddy_outlier_n_stdev_map', 'eddy_outlier_n_sqr_stdev_map', \ + 'eddy_movement_over_time' ] + if app.ARGS.eddyqc_text: + eddyqc_path = path.from_user(app.ARGS.eddyqc_text, False) + elif app.ARGS.eddyqc_all: + eddyqc_path = path.from_user(app.ARGS.eddyqc_all, False) + eddyqc_files.extend([ 'eddy_outlier_free_data.nii.gz', 'eddy_cnr_maps.nii.gz', 'eddy_residuals.nii.gz' ]) + if eddyqc_path: + if os.path.exists(eddyqc_path): + if os.path.isdir(eddyqc_path): + if any([ os.path.exists(os.path.join(eddyqc_path, filename)) for filename in eddyqc_files ]): + if app.FORCE_OVERWRITE: + app.warn('Output eddy QC directory already contains relevant files; these will be overwritten on completion') + else: + raise MRtrixError('Output eddy QC directory already contains relevant files (use -force to override)') + else: + if app.FORCE_OVERWRITE: + app.warn('Target for eddy QC output is not a directory; it will be overwritten on completion') + else: + raise MRtrixError('Target for eddy QC output exists, and is not a directory (use -force to override)') + + + eddy_manual_options = [] + if app.ARGS.eddy_options: + # Initially process as a list; we'll convert back to a string later + eddy_manual_options = app.ARGS.eddy_options.strip().split() + # Check for erroneous usages before we perform any data importing + if any(entry.startswith('--mask=') for entry in eddy_manual_options): + raise MRtrixError('Cannot provide eddy processing mask via -eddy_options "--mask=..." as manipulations are required; use -eddy_mask option instead') + if any(entry.startswith('--slspec=') for entry in eddy_manual_options): + raise MRtrixError('Cannot provide eddy slice specification file via -eddy_options "--slspec=..." as manipulations are required; use -eddy_slspec option instead') + if '--resamp=lsr' in eddy_manual_options: + raise MRtrixError('dwifslpreproc does not currently support least-squares reconstruction; this cannot be simply passed via -eddy_options') + + + # Don't import slspec file directly; just make sure it exists + if app.ARGS.eddy_slspec and not os.path.isfile(path.from_user(app.ARGS.eddy_slspec, False)): + raise MRtrixError('Unable to find file \"' + app.ARGS.eddy_slspec + '\" provided via -eddy_slspec option') + + + # Convert all input images into MRtrix format and store in scratch directory first + app.make_scratch_dir() + + grad_import_option = app.read_dwgrad_import_options() + json_import_option = '' + if app.ARGS.json_import: + json_import_option = ' -json_import ' + path.from_user(app.ARGS.json_import) + json_export_option = ' -json_export ' + path.to_scratch('dwi.json', True) + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('dwi.mif') + grad_import_option + json_import_option + json_export_option) + if app.ARGS.se_epi: + image.check_3d_nonunity(path.from_user(app.ARGS.se_epi, False)) + run.command('mrconvert ' + path.from_user(app.ARGS.se_epi) + ' ' + path.to_scratch('se_epi.mif')) + if app.ARGS.eddy_mask: + run.command('mrconvert ' + path.from_user(app.ARGS.eddy_mask) + ' ' + path.to_scratch('eddy_mask.mif') + ' -datatype bit') + + app.goto_scratch_dir() + + + # Get information on the input images, and check their validity + dwi_header = image.Header('dwi.mif') + if not len(dwi_header.size()) == 4: + raise MRtrixError('Input DWI must be a 4D image') + dwi_num_volumes = dwi_header.size()[3] + app.debug('Number of DWI volumes: ' + str(dwi_num_volumes)) + dwi_num_slices = dwi_header.size()[2] + app.debug('Number of DWI slices: ' + str(dwi_num_slices)) + dwi_pe_scheme = phaseencoding.get_scheme(dwi_header) + if app.ARGS.se_epi: + se_epi_header = image.Header('se_epi.mif') + # This doesn't necessarily apply any more: May be able to combine e.g. a P>>A from -se_epi with an A>>P b=0 image from the DWIs + # if not len(se_epi_header.size()) == 4: + # raise MRtrixError('File provided using -se_epi option must contain more than one image volume') + se_epi_pe_scheme = phaseencoding.get_scheme(se_epi_header) + if 'dw_scheme' not in dwi_header.keyval(): + raise MRtrixError('No diffusion gradient table found') + grad = dwi_header.keyval()['dw_scheme'] + if not len(grad) == dwi_num_volumes: + raise MRtrixError('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(dwi_num_volumes) + ' volumes); check your input data') + + + # Deal with slice timing information for eddy slice-to-volume correction + eddy_mporder = any(s.startswith('--mporder') for s in eddy_manual_options) + if eddy_mporder: + if 'SliceEncodingDirection' in dwi_header.keyval(): + slice_encoding_direction = dwi_header.keyval()['SliceEncodingDirection'] + app.debug('Slice encoding direction: ' + slice_encoding_direction) + if not slice_encoding_direction.startswith('k'): + raise MRtrixError('DWI header indicates that 3rd spatial axis is not the slice axis; this is not yet compatible with --mporder option in eddy, nor supported in dwifslpreproc') + slice_encoding_direction = image.axis2dir(slice_encoding_direction) + else: + app.console('No slice encoding direction information present; assuming third axis corresponds to slices') + slice_encoding_direction = [0,0,1] + slice_groups = [ ] + slice_timing = [ ] + # Since there's a chance that we may need to pad this info, we can't just copy this file + # to the scratch directory... + if app.ARGS.eddy_slspec: + try: + slice_groups = matrix.load_numeric(path.from_user(app.ARGS.eddy_slspec, False), dtype=int) + app.debug('Slice groups: ' + str(slice_groups)) + except ValueError: + try: + slice_timing = matrix.load_numeric(path.from_user(app.ARGS.eddy_slspec, False), dtype=float) + app.debug('Slice timing: ' + str(slice_timing)) + app.warn('\"slspec\" file provided to FSL eddy is supposed to contain slice indices for slice groups; ' + 'contents of file \"' + app.ARGS.eddy_slspec + '\" appears to instead be slice timings; ' + 'these data have been imported and will be converted to the appropriate format') + if len(slice_timing) != dwi_num_slices: + raise MRtrixError('Cannot use slice timing information from file \"' + app.ARGS.eddy_slspec + '\" for slice-to-volume correction: ' + \ + 'number of entries (' + str(len(slice_timing)) + ') does not match number of slices (' + str(dwi_num_slices) + ')') + except ValueError: + raise MRtrixError('Error parsing eddy \"slspec\" file \"' + app.ARGS.eddy_slspec + '\" ' + '(please see FSL eddy help page, specifically the --slspec option)') + else: + if 'SliceTiming' not in dwi_header.keyval(): + raise MRtrixError('Cannot perform slice-to-volume correction in eddy: ' + '-eddy_slspec option not specified, and no slice timing information present in input DWI header') + slice_timing = dwi_header.keyval()['SliceTiming'] + app.debug('Initial slice timing contents from header: ' + str(slice_timing)) + if slice_timing in ['invalid', 'variable']: + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'data flagged as "' + slice_timing + '"') + # Fudges necessary to maniupulate nature of slice timing data in cases where + # bad JSON formatting has led to the data not being simply a list of floats + # (whether from MRtrix3 DICOM conversion or from anything else) + if isinstance(slice_timing, utils.STRING_TYPES): + slice_timing = slice_timing.split() + if not isinstance(slice_timing, list): + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'data is not a list') + if len(slice_timing) == 1: + slice_timing = slice_timing[0] + if not isinstance(slice_timing, list): + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'unexpected data format') + if isinstance(slice_timing[0], list): + if not all( [ len(entry) == 1 for entry in slice_timing ] ): + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'data do not appear to be 1D') + slice_timing = [ entry[0] for entry in slice_timing ] + if not all( [ isinstance(entry, float) for entry in slice_timing ] ): + try: + slice_timing = [ float(entry) for entry in slice_timing ] + except ValueError: + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'data are not numeric') + app.debug('Re-formatted slice timing contents from header: ' + str(slice_timing)) + if len(slice_timing) != dwi_num_slices: + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'number of entries (' + str(len(slice_timing)) + ') does not match number of slices (' + str(dwi_header.size()[2]) + ')') + elif app.ARGS.eddy_slspec: + app.warn('-eddy_slspec option provided, but "--mporder=" not provided via -eddy_options; ' + 'slice specification file not imported as it would not be utilised by eddy') + + + # Use new features of dirstat to query the quality of the diffusion acquisition scheme + # Need to know the mean b-value in each shell, and the asymmetry value of each shell + # But don't bother testing / warning the user if they're already controlling for this + if not app.ARGS.eddy_options or not any(s.startswith('--slm=') for s in app.ARGS.eddy_options.split()): + shell_bvalues = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] + shell_asymmetries = [ float(value) for value in run.command('dirstat dwi.mif -output asym').stdout.splitlines() ] + # dirstat will skip any b=0 shell by default; therefore for correspondence between + # shell_bvalues and shell_symmetry, need to remove any b=0 from the former + if len(shell_bvalues) == len(shell_asymmetries) + 1: + shell_bvalues = shell_bvalues[1:] + elif len(shell_bvalues) != len(shell_asymmetries): + raise MRtrixError('Number of b-values reported by mrinfo (' + str(len(shell_bvalues)) + ') does not match number of outputs provided by dirstat (' + str(len(shell_asymmetries)) + ')') + for bvalue, asymmetry in zip(shell_bvalues, shell_asymmetries): + if asymmetry >= 0.1: + app.warn('sampling of b=' + str(bvalue) + ' shell is ' + ('strongly' if asymmetry >= 0.4 else 'moderately') + \ + ' asymmetric; distortion correction may benefit from use of: ' + \ + '-eddy_options " ... --slm=linear ... "') + + + # Since we want to access user-defined phase encoding information regardless of whether or not + # such information is present in the header, let's grab it here + manual_pe_dir = None + if app.ARGS.pe_dir: + manual_pe_dir = [ float(i) for i in phaseencoding.direction(app.ARGS.pe_dir) ] + app.debug('Manual PE direction: ' + str(manual_pe_dir)) + manual_trt = None + if app.ARGS.readout_time: + manual_trt = float(app.ARGS.readout_time) + app.debug('Manual readout time: ' + str(manual_trt)) + + + # Utilise the b-value clustering algorithm in src/dwi/shells.* + shell_indices = [ [ int(i) for i in entry.split(',') ] for entry in image.mrinfo('dwi.mif', 'shell_indices').split(' ') ] + # For each volume index, store the index of the shell to which it is attributed + # (this will make it much faster to determine whether or not two volumes belong to the same shell) + vol2shell = [ -1 ] * dwi_num_volumes + for index, volumes in enumerate(shell_indices): + for volume in volumes: + vol2shell[volume] = index + assert all(index >= 0 for index in vol2shell) + + + def grads_match(one, two): + # Are the two volumes assigned to the same b-value shell? + if vol2shell[one] != vol2shell[two]: + return False + # Dot product between gradient directions + # First, need to check for zero-norm vectors: + # - If both are zero, skip this check + # - If one is zero and the other is not, volumes don't match + # - If neither is zero, test the dot product + if any(grad[one][0:3]): + if not any(grad[two][0:3]): + return False + dot_product = grad[one][0]*grad[two][0] + grad[one][1]*grad[two][1] + grad[one][2]*grad[two][2] + if abs(dot_product) < 0.999: + return False + elif any(grad[two][0:3]): + return False + return True + + + # Manually generate a phase-encoding table for the input DWI based on user input + dwi_manual_pe_scheme = None + se_epi_manual_pe_scheme = None + auto_trt = 0.1 + dwi_auto_trt_warning = False + if manual_pe_dir: + + if manual_trt: + trt = manual_trt + else: + trt = auto_trt + dwi_auto_trt_warning = True + + # Still construct the manual PE scheme even with 'None' or 'Pair': + # there may be information in the header that we need to compare against + if pe_design == 'None': + line = list(manual_pe_dir) + line.append(trt) + dwi_manual_pe_scheme = [ line ] * dwi_num_volumes + app.debug('Manual DWI PE scheme for \'None\' PE design: ' + str(dwi_manual_pe_scheme)) + + # With 'Pair', also need to construct the manual scheme for SE EPIs + elif pe_design == 'Pair': + line = list(manual_pe_dir) + line.append(trt) + dwi_manual_pe_scheme = [ line ] * dwi_num_volumes + app.debug('Manual DWI PE scheme for \'Pair\' PE design: ' + str(dwi_manual_pe_scheme)) + se_epi_num_volumes = se_epi_header.size()[3] + if se_epi_num_volumes%2: + raise MRtrixError('If using -rpe_pair option, image provided using -se_epi must contain an even number of volumes') + # Assume that first half of volumes have same direction as series; + # second half have the opposite direction + se_epi_manual_pe_scheme = [ line ] * int(se_epi_num_volumes/2) + line = [ (-i if i else 0.0) for i in manual_pe_dir ] + line.append(trt) + se_epi_manual_pe_scheme.extend( [ line ] * int(se_epi_num_volumes/2) ) + app.debug('Manual SEEPI PE scheme for \'Pair\' PE design: ' + str(se_epi_manual_pe_scheme)) + + # If -rpe_all, need to scan through grad and figure out the pairings + # This will be required if relying on user-specified phase encode direction + # It will also be required at the end of the script for the manual recombination + # Update: The possible permutations of volume-matched acquisition is limited within the + # context of the -rpe_all option. In particular, the potential for having more + # than one b=0 volume within each half means that it is not possible to permit + # arbitrary ordering of those pairs, since b=0 volumes would then be matched + # despite having the same phase-encoding direction. Instead, explicitly enforce + # that volumes must be matched between the first and second halves of the DWI data. + elif pe_design == 'All': + if dwi_num_volumes%2: + raise MRtrixError('If using -rpe_all option, input image must contain an even number of volumes') + grads_matched = [ dwi_num_volumes ] * dwi_num_volumes + grad_pairs = [ ] + app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') + for index1 in range(int(dwi_num_volumes/2)): + if grads_matched[index1] == dwi_num_volumes: # As yet unpaired + for index2 in range(int(dwi_num_volumes/2), dwi_num_volumes): + if grads_matched[index2] == dwi_num_volumes: # Also as yet unpaired + if grads_match(index1, index2): + grads_matched[index1] = index2 + grads_matched[index2] = index1 + grad_pairs.append([index1, index2]) + app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + ': ' + str(grad[index1]) + ' ' + str(grad[index2])) + break + else: + raise MRtrixError('Unable to determine matching reversed phase-encode direction volume for DWI volume ' + str(index1)) + if not len(grad_pairs) == dwi_num_volumes/2: + raise MRtrixError('Unable to determine complete matching DWI volume pairs for reversed phase-encode combination') + # Construct manual PE scheme here: + # Regardless of whether or not there's a scheme in the header, need to have it: + # if there's one in the header, want to compare to the manually-generated one + dwi_manual_pe_scheme = [ ] + for index in range(0, dwi_num_volumes): + line = list(manual_pe_dir) + if index >= int(dwi_num_volumes/2): + line = [ (-i if i else 0.0) for i in line ] + line.append(trt) + dwi_manual_pe_scheme.append(line) + app.debug('Manual DWI PE scheme for \'All\' PE design: ' + str(dwi_manual_pe_scheme)) + + else: # No manual phase encode direction defined + + if not pe_design == 'Header': + raise MRtrixError('If not using -rpe_header, phase encoding direction must be provided using the -pe_dir option') + + + + def scheme_dirs_match(one, two): + for line_one, line_two in zip(one, two): + if not line_one[0:3] == line_two[0:3]: + return False + return True + + def scheme_times_match(one, two): + for line_one, line_two in zip(one, two): + if abs(line_one[3] - line_two[3]) > 5e-3: + return False + return True + + + + # Determine whether or not the phase encoding table generated manually should be used + # (possibly instead of a table present in the image header) + overwrite_dwi_pe_scheme = False + if dwi_pe_scheme: + if manual_pe_dir: + # Compare manual specification to that read from the header; + # overwrite & give warning to user if they differ + # Bear in mind that this could even be the case for -rpe_all; + # relying on earlier code having successfully generated the 'appropriate' + # PE scheme for the input volume based on the diffusion gradient table + if not scheme_dirs_match(dwi_pe_scheme, dwi_manual_pe_scheme): + app.warn('User-defined phase-encoding direction design does not match what is stored in DWI image header; proceeding with user specification') + overwrite_dwi_pe_scheme = True + if manual_trt: + # Compare manual specification to that read from the header + if not scheme_times_match(dwi_pe_scheme, dwi_manual_pe_scheme): + app.warn('User-defined total readout time does not match what is stored in DWI image header; proceeding with user specification') + overwrite_dwi_pe_scheme = True + if overwrite_dwi_pe_scheme: + dwi_pe_scheme = dwi_manual_pe_scheme # May be used later for triggering volume recombination + else: + dwi_manual_pe_scheme = None # To guarantee that these generated data are never used + else: + # Nothing in the header; rely entirely on user specification + if pe_design == 'Header': + raise MRtrixError('No phase encoding information found in DWI image header') + if not manual_pe_dir: + raise MRtrixError('No phase encoding information provided either in header or at command-line') + if dwi_auto_trt_warning: + app.console('Total readout time not provided at command-line; assuming sane default of ' + str(auto_trt)) + dwi_pe_scheme = dwi_manual_pe_scheme # May be needed later for triggering volume recombination + + # This may be required by -rpe_all for extracting b=0 volumes while retaining phase-encoding information + import_dwi_pe_table_option = '' + if dwi_manual_pe_scheme: + phaseencoding.save('dwi_manual_pe_scheme.txt', dwi_manual_pe_scheme) + import_dwi_pe_table_option = ' -import_pe_table dwi_manual_pe_scheme.txt' + + + # Find the index of the first DWI volume that is a b=0 volume + # This needs to occur at the outermost loop as it is pertinent information + # not only for the -align_seepi option, but also for when the -se_epi option + # is not provided at all, and the input to topup is extracted solely from the DWIs + bzero_threshold = float(CONFIG['BZeroThreshold']) if 'BZeroThreshold' in CONFIG else 10.0 + dwi_first_bzero_index = 0 + for line in grad: + if line[3] <= bzero_threshold: + break + dwi_first_bzero_index += 1 + app.debug('Index of first b=0 image in DWIs is ' + str(dwi_first_bzero_index)) + + + # Deal with the phase-encoding of the images to be fed to topup (if applicable) + do_topup = (not pe_design == 'None') + overwrite_se_epi_pe_scheme = False + se_epi_path = 'se_epi.mif' + dwi_permvols_preeddy_option = '' + dwi_permvols_posteddy_option = '' + dwi_bzero_added_to_se_epi = False + if app.ARGS.se_epi: + + # Newest version of eddy requires that topup field be on the same grid as the eddy input DWI + if not image.match(dwi_header, se_epi_header, up_to_dim=3): + app.console('DWIs and SE-EPI images used for inhomogeneity field estimation are defined on different image grids; ' + 'the latter will be automatically re-gridded to match the former') + new_se_epi_path = 'se_epi_regrid.mif' + run.command('mrtransform ' + se_epi_path + ' - -interp sinc -template dwi.mif | mrcalc - 0.0 -max ' + new_se_epi_path) + app.cleanup(se_epi_path) + se_epi_path = new_se_epi_path + se_epi_header = image.Header(se_epi_path) + + # 3 possible sources of PE information: DWI header, topup image header, command-line + # Any pair of these may conflict, and any one could be absent + + # Have to switch here based on phase-encoding acquisition design + if pe_design == 'Pair': + # Criteria: + # * If present in own header, ignore DWI header entirely - + # - If also provided at command-line, look for conflict & report + # - If not provided at command-line, nothing to do + # * If _not_ present in own header: + # - If provided at command-line, infer appropriately + # - If not provided at command-line, but the DWI header has that information, infer appropriately + if se_epi_pe_scheme: + if manual_pe_dir: + if not scheme_dirs_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): + app.warn('User-defined phase-encoding direction design does not match what is stored in SE EPI image header; proceeding with user specification') + overwrite_se_epi_pe_scheme = True + if manual_trt: + if not scheme_times_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): + app.warn('User-defined total readout time does not match what is stored in SE EPI image header; proceeding with user specification') + overwrite_se_epi_pe_scheme = True + if overwrite_se_epi_pe_scheme: + se_epi_pe_scheme = se_epi_manual_pe_scheme + else: + se_epi_manual_pe_scheme = None # To guarantee that these data are never used + else: + overwrite_se_epi_pe_scheme = True + se_epi_pe_scheme = se_epi_manual_pe_scheme + + elif pe_design == 'All': + # Criteria: + # * If present in own header: + # - Nothing to do + # * If _not_ present in own header: + # - Don't have enough information to proceed + # - Is this too harsh? (e.g. Have rules by which it may be inferred from the DWI header / command-line) + if not se_epi_pe_scheme: + raise MRtrixError('If explicitly including SE EPI images when using -rpe_all option, they must come with their own associated phase-encoding information in the image header') + + elif pe_design == 'Header': + # Criteria: + # * If present in own header: + # Nothing to do (-pe_dir option is mutually exclusive) + # * If _not_ present in own header: + # Cannot proceed + if not se_epi_pe_scheme: + raise MRtrixError('No phase-encoding information present in SE-EPI image header') + # If there is no phase encoding contrast within the SE-EPI series, + # try combining it with the DWI b=0 volumes, see if that produces some contrast + # However, this should probably only be permitted if the -align_seepi option is defined + se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() + if not se_epi_pe_scheme_has_contrast: + if app.ARGS.align_seepi: + app.console('No phase-encoding contrast present in SE-EPI images; will examine again after combining with DWI b=0 images') + new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_dwibzeros.mif' + # Don't worry about trying to produce a balanced scheme here + run.command('dwiextract dwi.mif - -bzero | mrcat - se_epi.mif ' + new_se_epi_path + ' -axis 3') + se_epi_header = image.Header(new_se_epi_path) + se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() + if se_epi_pe_scheme_has_contrast: + app.cleanup(se_epi_path) + se_epi_path = new_se_epi_path + se_epi_pe_scheme = phaseencoding.get_scheme(se_epi_header) + dwi_bzero_added_to_se_epi = True + # Delay testing appropriateness of the concatenation of these images + # (i.e. differences in contrast) to later + else: + raise MRtrixError('No phase-encoding contrast present in SE-EPI images, even after concatenating with b=0 images due to -align_seepi option; ' + 'cannot perform inhomogeneity field estimation') + else: + raise MRtrixError('No phase-encoding contrast present in SE-EPI images; cannot perform inhomogeneity field estimation') + + if app.ARGS.align_seepi: + + for field_name, description in { 'EchoTime': 'echo time', + 'RepetitionTime': 'repetition time', + 'FlipAngle': 'flip angle' }.items(): + dwi_value = dwi_header.keyval().get(field_name) + se_epi_value = se_epi_header.keyval().get(field_name) + if dwi_value and se_epi_value and dwi_value != se_epi_value: + app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different ' + description + ' to the DWIs being corrected. ' + 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup ' + 'due to use of the -align_seepi option.') + + # If we are using the -se_epi option, and hence the input images to topup have not come from the DWIs themselves, + # we need to insert the first b=0 DWI volume to the start of the topup input image. Otherwise, the field estimated + # by topup will not be correctly aligned with the volumes as they are processed by eddy. + # + # However, there's also a code path by which we may have already performed this addition. + # If we have already apliced the b=0 volumes from the DWI input with the SE-EPI image + # (due to the absence of phase-encoding contrast in the SE-EPI series), we don't want to + # re-attempt such a concatenation; the fact that the DWI b=0 images were inserted ahead of + # the SE-EPI images means the alignment issue should be dealt with. + + if dwi_first_bzero_index == len(grad) and not dwi_bzero_added_to_se_epi: + + app.warn('Unable to find b=0 volume in input DWIs to provide alignment between topup and eddy; script will proceed as though the -align_seepi option were not provided') + + # If b=0 volumes from the DWIs have already been added to the SE-EPI image due to an + # absence of phase-encoding contrast in the latter, we don't need to perform the following + elif not dwi_bzero_added_to_se_epi: + + run.command('mrconvert dwi.mif dwi_first_bzero.mif -coord 3 ' + str(dwi_first_bzero_index) + ' -axes 0,1,2') + dwi_first_bzero_pe = dwi_manual_pe_scheme[dwi_first_bzero_index] if overwrite_dwi_pe_scheme else dwi_pe_scheme[dwi_first_bzero_index] + + se_epi_pe_sum = [ 0, 0, 0 ] + se_epi_volume_to_remove = len(se_epi_pe_scheme) + for index, line in enumerate(se_epi_pe_scheme): + se_epi_pe_sum = [ i + j for i, j in zip(se_epi_pe_sum, line[0:3]) ] + if se_epi_volume_to_remove == len(se_epi_pe_scheme) and line[0:3] == dwi_first_bzero_pe[0:3]: + se_epi_volume_to_remove = index + new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_firstdwibzero.mif' + if (se_epi_pe_sum == [ 0, 0, 0 ]) and (se_epi_volume_to_remove < len(se_epi_pe_scheme)): + app.console('Balanced phase-encoding scheme detected in SE-EPI series; volume ' + str(se_epi_volume_to_remove) + ' will be removed and replaced with first b=0 from DWIs') + run.command('mrconvert ' + se_epi_path + ' - -coord 3 ' + ','.join([str(index) for index in range(len(se_epi_pe_scheme)) if not index == se_epi_volume_to_remove]) + ' | mrcat dwi_first_bzero.mif - ' + new_se_epi_path + ' -axis 3') + # Also need to update the phase-encoding scheme appropriately if it's being set manually + # (if embedded within the image headers, should be updated through the command calls) + if se_epi_manual_pe_scheme: + first_line = list(manual_pe_dir) + first_line.append(trt) + new_se_epi_manual_pe_scheme = [ ] + new_se_epi_manual_pe_scheme.append(first_line) + for index, entry in enumerate(se_epi_manual_pe_scheme): + if not index == se_epi_volume_to_remove: + new_se_epi_manual_pe_scheme.append(entry) + se_epi_manual_pe_scheme = new_se_epi_manual_pe_scheme + else: + if se_epi_pe_sum == [ 0, 0, 0 ] and se_epi_volume_to_remove == len(se_epi_pe_scheme): + app.console('Phase-encoding scheme of -se_epi image is balanced, but could not find appropriate volume with which to substitute first b=0 volume from DWIs; first b=0 DWI volume will be inserted to start of series, resulting in an unbalanced scheme') + else: + app.console('Unbalanced phase-encoding scheme detected in series provided via -se_epi option; first DWI b=0 volume will be inserted to start of series') + run.command('mrcat dwi_first_bzero.mif ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') + # Also need to update the phase-encoding scheme appropriately + if se_epi_manual_pe_scheme: + first_line = list(manual_pe_dir) + first_line.append(trt) + se_epi_manual_pe_scheme = [ first_line, se_epi_manual_pe_scheme ] + + # Ended branching based on balanced-ness of PE acquisition scheme within SE-EPI volumes + app.cleanup(se_epi_path) + app.cleanup('dwi_first_bzero.mif') + se_epi_path = new_se_epi_path + + # Ended branching based on: + # - Detection of first b=0 volume in DWIs; or + # - Prior merge of SE-EPI and DWI b=0 volumes due to no phase-encoding contrast in SE-EPI + + # Completed checking for presence of -se_epi option + + elif not pe_design == 'None': # No SE EPI images explicitly provided: In some cases, can extract appropriate b=0 images from DWI + + # If using 'All' or 'Header', and haven't been given any topup images, need to extract the b=0 volumes from the series, + # preserving phase-encoding information while doing so + # Preferably also make sure that there's some phase-encoding contrast in there... + # With -rpe_all, need to write inferred phase-encoding to file and import before using dwiextract so that the phase-encoding + # of the extracted b=0's is propagated to the generated b=0 series + run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' - | dwiextract - ' + se_epi_path + ' -bzero') + se_epi_header = image.Header(se_epi_path) + + # If there's no contrast remaining in the phase-encoding scheme, it'll be written to + # PhaseEncodingDirection and TotalReadoutTime rather than pe_scheme + # In this scenario, we will be unable to run topup, or volume recombination + if 'pe_scheme' not in se_epi_header.keyval(): + if pe_design == 'All': + raise MRtrixError('DWI header indicates no phase encoding contrast between b=0 images; cannot proceed with volume recombination-based pre-processing') + app.warn('DWI header indicates no phase encoding contrast between b=0 images; proceeding without inhomogeneity field estimation') + do_topup = False + run.function(os.remove, se_epi_path) + se_epi_path = None + se_epi_header = None + + + # If the first b=0 volume in the DWIs is in fact not the first volume (i.e. index zero), we're going to + # manually place it at the start of the DWI volumes when they are input to eddy, so that the + # first input volume to topup and the first input volume to eddy are one and the same. + # Note: If at a later date, the statistical outputs from eddy are considered (e.g. motion, outliers), + # then this volume permutation will need to be taken into account + if dwi_first_bzero_index == len(grad): + app.warn("No image volumes were classified as b=0 by MRtrix3; no permutation of order of DWI volumes can occur " + \ + "(do you need to adjust config file entry BZeroThreshold?)") + elif dwi_first_bzero_index: + app.console('First b=0 volume in input DWIs is volume index ' + str(dwi_first_bzero_index) + '; ' + 'this will be permuted to be the first volume (index 0) when eddy is run') + dwi_permvols_preeddy_option = ' -coord 3 ' + \ + str(dwi_first_bzero_index) + \ + ',0' + \ + (':' + str(dwi_first_bzero_index-1) if dwi_first_bzero_index > 1 else '') + \ + (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ + (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') + dwi_permvols_posteddy_option = ' -coord 3 1' + \ + (':' + str(dwi_first_bzero_index) if dwi_first_bzero_index > 1 else '') + \ + ',0' + \ + (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ + (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') + app.debug('mrconvert options for axis permutation:') + app.debug('Pre: ' + str(dwi_permvols_preeddy_option)) + app.debug('Post: ' + str(dwi_permvols_posteddy_option)) + + + + # This may be required when setting up the topup call + se_epi_manual_pe_table_option = '' + if se_epi_manual_pe_scheme: + phaseencoding.save('se_epi_manual_pe_scheme.txt', se_epi_manual_pe_scheme) + se_epi_manual_pe_table_option = ' -import_pe_table se_epi_manual_pe_scheme.txt' + + + # Need gradient table if running dwi2mask after applytopup to derive a brain mask for eddy + run.command('mrinfo dwi.mif -export_grad_mrtrix grad.b') + + + eddy_in_topup_option = '' + dwi_post_eddy_crop_option = '' + dwi_path = 'dwi.mif' + if do_topup: + + # topup will crash if its input image has a spatial dimension with a non-even size; + # presumably due to a downsampling by a factor of 2 in a multi-resolution scheme + # The newest eddy also requires the output from topup and the input DWIs to have the same size; + # therefore this restriction applies to the DWIs as well + # Rather than crop in this case (which would result in a cropped output image), + # duplicate the last slice on any problematic axis, and then crop that extra + # slice at the output step + # By this point, if the input SE-EPI images and DWIs are not on the same image grid, the + # SE-EPI images have already been re-gridded to DWI image space; + odd_axis_count = 0 + for axis_size in dwi_header.size()[:3]: + if int(axis_size%2): + odd_axis_count += 1 + if odd_axis_count: + app.console(str(odd_axis_count) + ' spatial ' + ('axes of DWIs have' if odd_axis_count > 1 else 'axis of DWIs has') + ' non-even size; ' + 'this will be automatically padded for compatibility with topup, and the extra slice' + ('s' if odd_axis_count > 1 else '') + ' erased afterwards') + for axis, axis_size in enumerate(dwi_header.size()[:3]): + if int(axis_size%2): + new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_pad' + str(axis) + '.mif' + run.command('mrconvert ' + se_epi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + se_epi_path + ' - ' + new_se_epi_path + ' -axis ' + str(axis)) + app.cleanup(se_epi_path) + se_epi_path = new_se_epi_path + new_dwi_path = os.path.splitext(dwi_path)[0] + '_pad' + str(axis) + '.mif' + run.command('mrconvert ' + dwi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + dwi_path + ' - ' + new_dwi_path + ' -axis ' + str(axis)) + app.cleanup(dwi_path) + dwi_path = new_dwi_path + dwi_post_eddy_crop_option += ' -coord ' + str(axis) + ' 0:' + str(axis_size-1) + # If we are padding the slice axis, and performing slice-to-volume correction, + # then we need to perform the corresponding padding to the slice timing + if eddy_mporder and slice_encoding_direction[axis]: + dwi_num_slices += 1 + # At this point in the script, this information may be encoded either within + # the slice timing vector (as imported from the image header), or as + # slice groups (i.e. in the format expected by eddy). How these data are + # stored affects how the padding is performed. + if slice_timing: + slice_timing.append(slice_timing[-1]) + elif slice_groups: + # Can't edit in place when looping through the list + new_slice_groups = [ ] + for group in slice_groups: + if axis_size-1 in group: + group.append(axis_size) + new_slice_groups.append(group) + slice_groups = new_slice_groups + + + # Do the conversion in preparation for topup + run.command('mrconvert ' + se_epi_path + ' topup_in.nii' + se_epi_manual_pe_table_option + ' -strides -1,+2,+3,+4 -export_pe_table topup_datain.txt') + app.cleanup(se_epi_path) + + # Run topup + topup_manual_options = '' + if app.ARGS.topup_options: + topup_manual_options = ' ' + app.ARGS.topup_options.strip() + topup_output = run.command(topup_cmd + ' --imain=topup_in.nii --datain=topup_datain.txt --out=field --fout=field_map' + fsl_suffix + ' --config=' + topup_config_path + ' --verbose' + topup_manual_options) + with open('topup_output.txt', 'w') as topup_output_file: + topup_output_file.write(topup_output.stdout + '\n' + topup_output.stderr) + if app.VERBOSITY > 1: + app.console('Output of topup command:') + sys.stderr.write(topup_output.stdout + '\n' + topup_output.stderr + '\n') + + # Apply the warp field to the input image series to get an initial corrected volume estimate + # applytopup can't receive the complete DWI input and correct it as a whole, because the phase-encoding + # details may vary between volumes + if dwi_manual_pe_scheme: + run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + ' - | mrinfo - -export_pe_eddy applytopup_config.txt applytopup_indices.txt') + else: + run.command('mrinfo ' + dwi_path + ' -export_pe_eddy applytopup_config.txt applytopup_indices.txt') + + + # Update: Call applytopup separately for each unique phase-encoding + # This should be the most compatible option with more complex phase-encoding acquisition designs, + # since we don't need to worry about applytopup performing volume recombination + # Plus, recombination doesn't need to be optimal; we're only using this to derive a brain mask + applytopup_image_list = [ ] + index = 1 + applytopup_config = matrix.load_matrix('applytopup_config.txt') + for line in applytopup_config: + prefix = os.path.splitext(dwi_path)[0] + '_pe_' + str(index) + input_path = prefix + '.nii' + json_path = prefix + '.json' + temp_path = prefix + '_applytopup.nii' + output_path = prefix + '_applytopup.mif' + run.command('dwiextract ' + dwi_path + import_dwi_pe_table_option + ' -pe ' + ','.join(str(value) for value in line) + ' - | mrconvert - ' + input_path + ' -strides -1,+2,+3,+4 -json_export ' + json_path) + run.command(applytopup_cmd + ' --imain=' + input_path + ' --datain=applytopup_config.txt --inindex=' + str(index) + ' --topup=field --out=' + temp_path + ' --method=jac') + app.cleanup(input_path) + temp_path = fsl.find_image(temp_path) + run.command('mrconvert ' + temp_path + ' ' + output_path + ' -json_import ' + json_path) + app.cleanup(json_path) + app.cleanup(temp_path) + applytopup_image_list.append(output_path) + index += 1 + + # Use the initial corrected volumes to derive a brain mask for eddy + if not app.ARGS.eddy_mask: + if len(applytopup_image_list) == 1: + run.command('dwi2mask ' + applytopup_image_list[0] + ' - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') + else: + run.command('mrcat ' + ' '.join(applytopup_image_list) + ' - -axis 3 | dwi2mask - - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') + + app.cleanup(applytopup_image_list) + + eddy_in_topup_option = ' --topup=field' + + else: + + # Generate a processing mask for eddy based on the uncorrected input DWIs + if not app.ARGS.eddy_mask: + run.command('dwi2mask ' + dwi_path + ' - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') + + + # Use user supplied mask for eddy instead of one derived from the images using dwi2mask + if app.ARGS.eddy_mask: + if image.match('eddy_mask.mif', dwi_path, up_to_dim=3): + run.command('mrconvert eddy_mask.mif eddy_mask.nii -datatype float32 -stride -1,+2,+3') + else: + app.warn('User-provided processing mask for eddy does not match DWI voxel grid; resampling') + run.command('mrtransform eddy_mask.mif - -template ' + dwi_path + ' -interp linear | ' + + 'mrthreshold - -abs 0.5 - | ' + + 'mrconvert - eddy_mask.nii -datatype float32 -stride -1,+2,+3') + app.cleanup('eddy_mask.mif') + + # Generate the text file containing slice timing / grouping information if necessary + if eddy_mporder: + if slice_timing: + # This list contains, for each slice, the timing offset between acquisition of the + # first slice in the volume, and acquisition of that slice + # Eddy however requires a text file where each row contains those slices that were + # acquired with a single readout, in ordered rows from first slice (group) + # acquired to last slice (group) acquired + if sum(slice_encoding_direction) < 0: + slice_timing = reversed(slice_timing) + slice_groups = [ [ x[0] for x in g ] for _, g in itertools.groupby(sorted(enumerate(slice_timing), key=lambda x:x[1]), key=lambda x:x[1]) ] #pylint: disable=unused-variable + app.debug('Slice timing: ' + str(slice_timing)) + app.debug('Resulting slice groups: ' + str(slice_groups)) + # Variable slice_groups may have already been defined in the correct format. + # In that instance, there's nothing to do other than write it to file; + # UNLESS the slice encoding direction is known to be reversed, in which case + # we need to reverse the timings. Would think that this would however be + # rare, given it requires that the slspec text file be provided manually but + # SliceEncodingDirection to be present. + elif slice_groups and sum(slice_encoding_direction) < 0: + new_slice_groups = [ ] + for group in new_slice_groups: + new_slice_groups.append([ dwi_num_slices-index for index in group ]) + app.debug('Slice groups reversed due to negative slice encoding direction') + app.debug('Original: ' + str(slice_groups)) + app.debug('New: ' + str(new_slice_groups)) + slice_groups = new_slice_groups + + matrix.save_numeric('slspec.txt', slice_groups, add_to_command_history=False, fmt='%d') + eddy_manual_options.append('--slspec=slspec.txt') + + + # Revert eddy_manual_options from a list back to a single string + eddy_manual_options = (' ' + ' '.join(eddy_manual_options)) if eddy_manual_options else '' + + + # Prepare input data for eddy + run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + dwi_permvols_preeddy_option + ' eddy_in.nii -strides -1,+2,+3,+4 -export_grad_fsl bvecs bvals -export_pe_eddy eddy_config.txt eddy_indices.txt') + app.cleanup(dwi_path) + + # Run eddy + # If a CUDA version is in PATH, run that first; if it fails, re-try using the non-CUDA version + eddy_all_options = '--imain=eddy_in.nii --mask=eddy_mask.nii --acqp=eddy_config.txt --index=eddy_indices.txt --bvecs=bvecs --bvals=bvals' + eddy_in_topup_option + eddy_manual_options + ' --out=dwi_post_eddy --verbose' + eddy_cuda_cmd = fsl.eddy_binary(True) + eddy_openmp_cmd = fsl.eddy_binary(False) + if eddy_cuda_cmd: + # If running CUDA version, but OpenMP version is also available, don't stop the script if the CUDA version fails + try: + eddy_output = run.command(eddy_cuda_cmd + ' ' + eddy_all_options) + except run.MRtrixCmdError as exception: + if not eddy_openmp_cmd: + raise + with open('eddy_cuda_failure_output.txt', 'w') as eddy_output_file: + eddy_output_file.write(exception.stdout + '\n' + exception.stderr + '\n') + app.console('CUDA version of \'eddy\' was not successful; attempting OpenMP version') + eddy_output = run.command(eddy_openmp_cmd + ' ' + eddy_all_options) + else: + eddy_output = run.command(eddy_openmp_cmd + ' ' + eddy_all_options) + with open('eddy_output.txt', 'w') as eddy_output_file: + eddy_output_file.write(eddy_output.stdout + '\n' + eddy_output.stderr + '\n') + if app.VERBOSITY > 1: + app.console('Output of eddy command:') + sys.stderr.write(eddy_output.stdout + '\n' + eddy_output.stderr + '\n') + app.cleanup('eddy_in.nii') + + eddy_output_image_path = fsl.find_image('dwi_post_eddy') + + + # Check to see whether or not eddy has provided a rotated bvecs file; + # if it has, import this into the output image + bvecs_path = 'dwi_post_eddy.eddy_rotated_bvecs' + if not os.path.isfile(bvecs_path): + app.warn('eddy has not provided rotated bvecs file; using original gradient table. Recommend updating FSL eddy to version 5.0.9 or later.') + bvecs_path = 'bvecs' + + + # Run eddy qc tool QUAD if installed and one of -eddyqc_text or -eddyqc_all is specified + if eddyqc_path: + eddyqc_cmd = find_executable('eddy_quad') + if eddyqc_cmd: + # The eddy_quad script is not compatible with Python 2; try to explicitly invoke Python 3 + if sys.version_info[0] == 2: + if find_executable('python3'): + eddyqc_cmd = 'python3 ' + eddyqc_cmd + else: + eddyqc_cmd = 'eddy_quad' + eddyqc_options = '-idx eddy_indices.txt -par eddy_config.txt -m eddy_mask.nii -b bvals' + if os.path.isfile('dwi_post_eddy.eddy_residuals'): + eddyqc_options += ' -g ' + bvecs_path + if do_topup: + eddyqc_options += ' -f ' + fsl.find_image('field_fieldcoef') + if eddy_mporder: + eddyqc_options += ' -s slspec.txt' + if app.VERBOSITY > 2: + eddyqc_options += ' -v' + try: + run.command(eddyqc_cmd + ' dwi_post_eddy ' + eddyqc_options) + except run.MRtrixCmdError as exception: + app.warn('Error running automated eddy qc tool QUAD; data will not be provided') + app.debug(str(exception)) + # Delete the directory if the script only made it partway through + try: + shutil.rmtree('dwi_post_eddy.qc') + except OSError: + pass + else: + app.console('Command \'eddy_quad\' not found in PATH; not running automated eddy qc tool QUAD') + + + # Have to retain these images until after eddyQC is run + # If using -eddyqc_all, also write the mask provided to eddy to the output directory; + # therefore don't delete it yet here + if not app.ARGS.eddyqc_all: + app.cleanup('eddy_mask.nii') + if do_topup: + app.cleanup(fsl.find_image('field_fieldcoef')) + + + # Get the axis strides from the input series, so the output image can be modified to match + stride_option = ' -strides ' + ','.join([str(i) for i in dwi_header.strides()]) + + + # Determine whether or not volume recombination should be performed + # This could be either due to use of -rpe_all option, or just due to the data provided with -rpe_header + # Rather than trying to re-use the code that was used in the case of -rpe_all, run fresh code + # The phase-encoding scheme needs to be checked also + volume_matchings = [ dwi_num_volumes ] * dwi_num_volumes + volume_pairs = [ ] + app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') + for index1 in range(dwi_num_volumes): + if volume_matchings[index1] == dwi_num_volumes: # As yet unpaired + for index2 in range(index1+1, dwi_num_volumes): + if volume_matchings[index2] == dwi_num_volumes: # Also as yet unpaired + # Here, need to check both gradient matching and reversed phase-encode direction + if not any(dwi_pe_scheme[index1][i] + dwi_pe_scheme[index2][i] for i in range(0,3)) and grads_match(index1, index2): + volume_matchings[index1] = index2 + volume_matchings[index2] = index1 + volume_pairs.append([index1, index2]) + app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + '\n' + + 'Phase encoding: ' + str(dwi_pe_scheme[index1]) + ' ' + str(dwi_pe_scheme[index2]) + '\n' + + 'Gradients: ' + str(grad[index1]) + ' ' + str(grad[index2])) + break + + + if len(volume_pairs) != int(dwi_num_volumes/2): + + if do_topup: + app.cleanup('topup_in.nii') + app.cleanup(fsl.find_image('field_map')) + + # Convert the resulting volume to the output image, and re-insert the diffusion encoding + run.command('mrconvert ' + eddy_output_image_path + ' result.mif' + dwi_permvols_posteddy_option + dwi_post_eddy_crop_option + stride_option + ' -fslgrad ' + bvecs_path + ' bvals') + app.cleanup(eddy_output_image_path) + + else: + app.console('Detected matching DWI volumes with opposing phase encoding; performing explicit volume recombination') + + # Perform a manual combination of the volumes output by eddy, since LSR is disabled + + # Generate appropriate bvecs / bvals files + # Particularly if eddy has provided rotated bvecs, since we're combining two volumes into one that + # potentially have subject rotation between them (and therefore the sensitisation direction is + # not precisely equivalent), the best we can do is take the mean of the two vectors. + # Manual recombination of volumes needs to take into account the explicit volume matching + + bvecs = matrix.load_matrix(bvecs_path) + bvecs_combined_transpose = [ ] + bvals_combined = [ ] + + for pair in volume_pairs: + bvec_mean = [ 0.5*(bvecs[0][pair[0]] + bvecs[0][pair[1]]), + 0.5*(bvecs[1][pair[0]] + bvecs[1][pair[1]]), + 0.5*(bvecs[2][pair[0]] + bvecs[2][pair[1]]) ] + norm2 = matrix.dot(bvec_mean, bvec_mean) + + # If one diffusion sensitisation gradient direction is reversed with respect to + # the other, still want to enable their recombination; but need to explicitly + # account for this when averaging the two directions + if norm2 < 0.5: + bvec_mean = [ 0.5*(bvecs[0][pair[0]] - bvecs[0][pair[1]]), + 0.5*(bvecs[1][pair[0]] - bvecs[1][pair[1]]), + 0.5*(bvecs[2][pair[0]] - bvecs[2][pair[1]]) ] + norm2 = matrix.dot(bvec_mean, bvec_mean) + + # Occasionally a b=0 volume can have a zero vector + if norm2: + factor = 1.0 / math.sqrt(norm2) + new_vec = [ bvec_mean[0]*factor, bvec_mean[1]*factor, bvec_mean[2]*factor ] + else: + new_vec = [ 0.0, 0.0, 0.0 ] + bvecs_combined_transpose.append(new_vec) + bvals_combined.append(0.5 * (grad[pair[0]][3] + grad[pair[1]][3])) + + bvecs_combined = matrix.transpose(bvecs_combined_transpose) + matrix.save_matrix('bvecs_combined', bvecs_combined, add_to_command_history=False) + matrix.save_vector('bvals_combined', bvals_combined, add_to_command_history=False) + + # Prior to 5.0.8, a bug resulted in the output field map image from topup having an identity transform, + # regardless of the transform of the input image + # Detect this, and manually replace the transform if necessary + # (even if this doesn't cause an issue with the subsequent mrcalc command, it may in the future, it's better for + # visualising the script intermediate files, and it gives the user a warning about an out-of-date FSL) + field_map_image = fsl.find_image('field_map') + field_map_header = image.Header(field_map_image) + if not image.match('topup_in.nii', field_map_header, up_to_dim=3): + app.warn('topup output field image has erroneous header; recommend updating FSL to version 5.0.8 or later') + new_field_map_image = 'field_map_fix.mif' + run.command('mrtransform ' + field_map_image + ' -replace topup_in.nii ' + new_field_map_image) + app.cleanup(field_map_image) + field_map_image = new_field_map_image + # In FSL 6.0.0, field map image is erroneously constructed with the same number of volumes as the input image, + # with all but the first volume containing intensity-scaled duplicates of the uncorrected input images + # The first volume is however the expected field offset image + elif len(field_map_header.size()) == 4: + app.console('Correcting erroneous FSL 6.0.0 field map image output') + new_field_map_image = 'field_map_fix.mif' + run.command('mrconvert ' + field_map_image + ' -coord 3 0 -axes 0,1,2 ' + new_field_map_image) + app.cleanup(field_map_image) + field_map_image = new_field_map_image + app.cleanup('topup_in.nii') + + + # Derive the weight images + # Scaling term for field map is identical to the bandwidth provided in the topup config file + # (converts Hz to pixel count; that way a simple image gradient can be used to get the Jacobians) + # Let mrfilter apply the default 1 voxel size gaussian smoothing filter before calculating the field gradient + # + # The jacobian image may be different for any particular volume pair + # The appropriate PE directions and total readout times can be acquired from the eddy-style config/index files + # eddy_config.txt and eddy_indices.txt + eddy_config = matrix.load_matrix('eddy_config.txt') + eddy_indices = matrix.load_vector('eddy_indices.txt', dtype=int) + app.debug('EDDY config: ' + str(eddy_config)) + app.debug('EDDY indices: ' + str(eddy_indices)) + + # This section derives, for each phase encoding configuration present, the 'weight' to be applied + # to the image during volume recombination, which is based on the Jacobian of the field in the + # phase encoding direction + for index, config in enumerate(eddy_config): + pe_axis = [ i for i, e in enumerate(config[0:3]) if e != 0][0] + sign_multiplier = ' -1.0 -mult' if config[pe_axis] < 0 else '' + field_derivative_path = 'field_deriv_pe_' + str(index+1) + '.mif' + run.command('mrcalc ' + field_map_image + ' ' + str(config[3]) + ' -mult' + sign_multiplier + ' - | mrfilter - gradient - | mrconvert - ' + field_derivative_path + ' -coord 3 ' + str(pe_axis) + ' -axes 0,1,2') + jacobian_path = 'jacobian_' + str(index+1) + '.mif' + run.command('mrcalc 1.0 ' + field_derivative_path + ' -add 0.0 -max ' + jacobian_path) + app.cleanup(field_derivative_path) + run.command('mrcalc ' + jacobian_path + ' ' + jacobian_path + ' -mult weight' + str(index+1) + '.mif') + app.cleanup(jacobian_path) + app.cleanup(field_map_image) + + # If eddy provides its main image output in a compressed format, the code block below will need to + # uncompress that image independently for every volume pair. Instead, if this is the case, let's + # convert it to an uncompressed format before we do anything with it. + if eddy_output_image_path.endswith('.gz'): + new_eddy_output_image_path = 'dwi_post_eddy_uncompressed.mif' + run.command('mrconvert ' + eddy_output_image_path + ' ' + new_eddy_output_image_path) + app.cleanup(eddy_output_image_path) + eddy_output_image_path = new_eddy_output_image_path + + # If the DWI volumes were permuted prior to running eddy, then the simplest approach is to permute them + # back to their original positions; otherwise, the stored gradient vector directions / phase encode + # directions / matched volume pairs are no longer appropriate + if dwi_permvols_posteddy_option: + new_eddy_output_image_path = os.path.splitext(eddy_output_image_path)[0] + '_volpermuteundo.mif' + run.command('mrconvert ' + eddy_output_image_path + dwi_permvols_posteddy_option + ' ' + new_eddy_output_image_path) + app.cleanup(eddy_output_image_path) + eddy_output_image_path = new_eddy_output_image_path + + # This section extracts the two volumes corresponding to each reversed phase-encoded volume pair, and + # derives a single image volume based on the recombination equation + combined_image_list = [ ] + progress = app.ProgressBar('Performing explicit volume recombination', len(volume_pairs)) + for index, volumes in enumerate(volume_pairs): + pe_indices = [ eddy_indices[i] for i in volumes ] + run.command('mrconvert ' + eddy_output_image_path + ' volume0.mif -coord 3 ' + str(volumes[0])) + run.command('mrconvert ' + eddy_output_image_path + ' volume1.mif -coord 3 ' + str(volumes[1])) + # Volume recombination equation described in Skare and Bammer 2010 + combined_image_path = 'combined' + str(index) + '.mif' + run.command('mrcalc volume0.mif weight' + str(pe_indices[0]) + '.mif -mult volume1.mif weight' + str(pe_indices[1]) + '.mif -mult -add weight' + str(pe_indices[0]) + '.mif weight' + str(pe_indices[1]) + '.mif -add -divide 0.0 -max ' + combined_image_path) + combined_image_list.append(combined_image_path) + run.function(os.remove, 'volume0.mif') + run.function(os.remove, 'volume1.mif') + progress.increment() + progress.done() + + app.cleanup(eddy_output_image_path) + for index in range(0, len(eddy_config)): + app.cleanup('weight' + str(index+1) + '.mif') + + # Finally the recombined volumes must be concatenated to produce the resulting image series + combine_command = ['mrcat', combined_image_list, '-', '-axis', '3', '|', \ + 'mrconvert', '-', 'result.mif', '-fslgrad', 'bvecs_combined', 'bvals_combined'] + if dwi_post_eddy_crop_option: + combine_command.extend(dwi_post_eddy_crop_option.strip().split(' ')) + combine_command.extend(stride_option.strip().split(' ')) + run.command(combine_command) + app.cleanup(combined_image_list) + + + # Grab any relevant files that eddy has created, and copy them to the requested directory + if eddyqc_path: + if app.FORCE_OVERWRITE and os.path.exists(eddyqc_path) and not os.path.isdir(eddyqc_path): + run.function(os.remove, eddyqc_path) + if not os.path.exists(eddyqc_path): + run.function(os.makedirs, eddyqc_path) + for filename in eddyqc_files: + if os.path.exists('dwi_post_eddy.' + filename): + run.function(shutil.copy, 'dwi_post_eddy.' + filename, os.path.join(eddyqc_path, filename)) + # Also grab any files generated by the eddy qc tool QUAD + if os.path.isdir('dwi_post_eddy.qc'): + if app.FORCE_OVERWRITE and os.path.exists(os.path.join(eddyqc_path, 'quad')): + run.function(shutil.rmtree(os.path.join(eddyqc_path, 'quad'))) + run.function(shutil.copytree, 'dwi_post_eddy.qc', os.path.join(eddyqc_path, 'quad')) + # Also grab the brain mask that was provided to eddy if -eddyqc_all was specified + if app.ARGS.eddyqc_all: + run.function(shutil.copy, 'eddy_mask.nii', os.path.join(eddyqc_path, 'eddy_mask.nii')) + app.cleanup('eddy_mask.nii') + + + + + keys_to_remove = [ 'MultibandAccelerationFactor', 'SliceEncodingDirection', 'SliceTiming' ] + # These keys are still relevant for the output data if no EPI distortion correction was performed + if do_topup: + keys_to_remove.extend([ 'PhaseEncodingDirection', 'TotalReadoutTime', 'pe_scheme' ]) + # Get the header key-value entries from the input DWI, remove those we don't wish to keep, and + # export the result to a new JSON file so that they can be inserted into the output header + with open('dwi.json', 'r') as input_json_file: + keyval = json.load(input_json_file) + for key in keys_to_remove: + keyval.pop(key, None) + # 'Stash' the phase encoding scheme of the original uncorrected DWIs, since it still + # may be useful information at some point in the future but is no longer relevant + # for e.g. tracking for different volumes, or performing any geometric corrections + if do_topup: + keyval['prior_pe_scheme'] = dwi_manual_pe_scheme if dwi_manual_pe_scheme else dwi_pe_scheme + with open('output.json', 'w') as output_json_file: + json.dump(keyval, output_json_file) + + + # Finish! + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output) + grad_export_option, mrconvert_keyval='output.json', force=app.FORCE_OVERWRITE) + + + + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwigradcheck b/bin/dwigradcheck index 3b608ded6b..0ba5a33ea1 100755 --- a/bin/dwigradcheck +++ b/bin/dwigradcheck @@ -1,215 +1,206 @@ #!/usr/bin/env python -# Script for checking the orientation of the diffusion gradient table - -# Make the corresponding MRtrix3 Python libraries available -import inspect, os, sys -lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib')) -if not os.path.isdir(lib_folder): - sys.stderr.write('Unable to locate MRtrix3 Python libraries') - sys.exit(1) -sys.path.insert(0, lib_folder) - -import copy, numbers, shutil -from mrtrix3 import app, image, run, path - -app.init('Robert E. Smith (robert.smith@florey.edu.au)', 'Check the orientation of the diffusion gradient table') -app.cmdline.addCitation('','Jeurissen, B.; Leemans, A.; Sijbers, J. Automated correction of improperly rotated diffusion gradient orientations in diffusion weighted MRI. Medical Image Analysis, 2014, 18(7), 953-962', False) -app.cmdline.add_argument('input', help='The input DWI series to be checked') - -grad_export = app.cmdline.add_argument_group('Options for exporting the estimated best gradient table') -grad_export.add_argument('-export_grad_mrtrix', metavar='grad', help='Export the final gradient table in MRtrix format') -grad_export.add_argument('-export_grad_fsl', nargs=2, metavar=('bvecs', 'bvals'), help='Export the final gradient table in FSL bvecs/bvals format') - -grad_import = app.cmdline.add_argument_group('Options for importing the gradient table') -grad_import.add_argument('-grad', help='Provide a gradient table in MRtrix format') -grad_import.add_argument('-fslgrad', nargs=2, metavar=('bvecs', 'bvals'), help='Provide a gradient table in FSL bvecs/bvals format') - -options = app.cmdline.add_argument_group('Options for the dwigradcheck script') -options.add_argument('-mask', help='Provide a brain mask image') -options.add_argument('-number', type=int, default=10000, help='Set the number of tracks to generate for each test') - -app.cmdline.flagMutuallyExclusiveOptions( ['grad', 'fslgrad' ]) -app.cmdline.flagMutuallyExclusiveOptions( ['export_grad_mrtrix', 'export_grad_fsl' ]) -app.parse() +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. -image_dimensions = image.Header(path.fromUser(app.args.input, False)).size() -if len(image_dimensions) != 4: - app.error('Input image must be a 4D image') -if min(image_dimensions) == 1: - app.error('Cannot perform tractography on an image with a unity dimension') -num_volumes = image_dimensions[3] +import copy, numbers, os, shutil, sys -app.makeTempDir() -# Make sure the image data can be memory-mapped -run.command('mrconvert ' + app.args.input + ' ' + path.toTemp('data.mif', True) + ' -strides 0,0,0,1 -datatype float32') -if app.args.grad: - shutil.copy(path.fromUser(app.args.grad, False), path.toTemp('grad.b', False)) -elif app.args.fslgrad: - shutil.copy(path.fromUser(app.args.fslgrad[0], False), path.toTemp('bvecs', False)) - shutil.copy(path.fromUser(app.args.fslgrad[1], False), path.toTemp('bvals', False)) -if app.args.mask: - run.command('mrconvert ' + path.fromUser(app.args.mask, True) + ' ' + path.toTemp('mask.mif', True) + ' -datatype bit') +def usage(cmdline): #pylint: disable=unused-variable + from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Check the orientation of the diffusion gradient table') + cmdline.add_citation('Jeurissen, B.; Leemans, A.; Sijbers, J. Automated correction of improperly rotated diffusion gradient orientations in diffusion weighted MRI. Medical Image Analysis, 2014, 18(7), 953-962') + cmdline.add_argument('input', help='The input DWI series to be checked') + cmdline.add_argument('-mask', metavar='image', help='Provide a brain mask image') + cmdline.add_argument('-number', type=int, default=10000, help='Set the number of tracks to generate for each test') -app.gotoTempDir() + app.add_dwgrad_export_options(cmdline) + app.add_dwgrad_import_options(cmdline) -# Make sure we have gradient table stored externally to header in both MRtrix and FSL formats -if not os.path.isfile('grad.b'): - if os.path.isfile('bvecs'): - run.command('mrinfo data.mif -fslgrad bvecs bvals -export_grad_mrtrix grad.b') - else: - run.command('mrinfo data.mif -export_grad_mrtrix grad.b') -if not os.path.isfile('bvecs'): - if os.path.isfile('grad.b'): - run.command('mrinfo data.mif -grad grad.b -export_grad_fsl bvecs bvals') - else: - run.command('mrinfo data.mif -export_grad_fsl bvecs bvals') +def execute(): #pylint: disable=unused-variable + from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import app, image, matrix, path, run #pylint: disable=no-name-in-module, import-outside-toplevel -# Import both of these into local memory -with open('grad.b', 'r') as f: - grad_mrtrix = f.read().split('\n') -with open('bvecs', 'r') as f: - grad_fsl = f.read().split('\n') -# Erase the empty last line if necessary -if len(grad_mrtrix[-1]) <= 1: - grad_mrtrix.pop() -if len(grad_fsl[-1]) <= 1: - grad_fsl.pop() -# Turn into float matrices -grad_mrtrix = [ [ float(f) for f in line.split(' ') ] for line in grad_mrtrix ] -grad_fsl = [ [ float(f) for f in line.split() ] for line in grad_fsl ] -# Is our gradient table of the correct length? -if not len(grad_mrtrix) == num_volumes: - app.error('Number of entries in gradient table does not match number of DWI volumes') -if not len(grad_fsl) == 3 or not len(grad_fsl[0]) == num_volumes: - app.error('Internal error (inconsistent gradient table storage)') + image_dimensions = image.Header(path.from_user(app.ARGS.input, False)).size() + if len(image_dimensions) != 4: + raise MRtrixError('Input image must be a 4D image') + if min(image_dimensions) == 1: + raise MRtrixError('Cannot perform tractography on an image with a unity dimension') + num_volumes = image_dimensions[3] + app.make_scratch_dir() -# Generate a brain mask if we weren't provided with one -# Note that gradient table must be explicitly loaded, since there may not -# be one in the image header (user may be relying on -grad or -fslgrad input options) -if not os.path.exists('mask.mif'): - run.command('dwi2mask data.mif mask.mif -grad grad.b') + # Make sure the image data can be memory-mapped + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('data.mif') + ' -strides 0,0,0,1 -datatype float32') -# How many tracks are we going to generate? -number_option = ' -select ' + str(app.args.number) + if app.ARGS.grad: + shutil.copy(path.from_user(app.ARGS.grad, False), path.to_scratch('grad.b', False)) + elif app.ARGS.fslgrad: + shutil.copy(path.from_user(app.ARGS.fslgrad[0], False), path.to_scratch('bvecs', False)) + shutil.copy(path.from_user(app.ARGS.fslgrad[1], False), path.to_scratch('bvals', False)) + if app.ARGS.mask: + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') + app.goto_scratch_dir() -# What variations of gradient errors can we conceive? + # Make sure we have gradient table stored externally to header in both MRtrix and FSL formats + if not os.path.isfile('grad.b'): + if os.path.isfile('bvecs'): + run.command('mrinfo data.mif -fslgrad bvecs bvals -export_grad_mrtrix grad.b') + else: + run.command('mrinfo data.mif -export_grad_mrtrix grad.b') -# Done: -# * Has an axis been flipped? (none, 0, 1, 2) -# * Have axes been swapped? (012 021 102 120 201 210) -# * For both flips & swaps, it could occur in either scanner or image space... + if not os.path.isfile('bvecs'): + if os.path.isfile('grad.b'): + run.command('mrinfo data.mif -grad grad.b -export_grad_fsl bvecs bvals') + else: + run.command('mrinfo data.mif -export_grad_fsl bvecs bvals') -# To do: -# * Have the gradients been defined with respect to image space rather than scanner space? -# * After conversion to gradients in image space, are they _then_ defined with respect to scanner space? -# (should the above two be tested independently from the axis flips / permutations?) + # Import both of these into local memory + grad_mrtrix = matrix.load_matrix('grad.b') + grad_fsl = matrix.load_matrix('bvecs') + # Is our gradient table of the correct length? + if not len(grad_mrtrix) == num_volumes: + raise MRtrixError('Number of entries in gradient table does not match number of DWI volumes') + if not len(grad_fsl) == 3 or not len(grad_fsl[0]) == num_volumes: + raise MRtrixError('Internal error (inconsistent gradient table storage)') -axis_flips = [ 'none', 0, 1, 2 ] -axis_permutations = [ ( 0, 1, 2 ), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0) ] -grad_basis = [ 'scanner', 'image' ] -total_tests = len(axis_flips) * len(axis_permutations) * len(grad_basis) + # Generate a brain mask if we weren't provided with one + # Note that gradient table must be explicitly loaded, since there may not + # be one in the image header (user may be relying on -grad or -fslgrad input options) + if not os.path.exists('mask.mif'): + run.command('dwi2mask data.mif mask.mif -grad grad.b') + # How many tracks are we going to generate? + number_option = ' -select ' + str(app.ARGS.number) -# List where the first element is the mean length -lengths = [ ] -progress = app.progressBar('Testing gradient table alterations (0 of ' + str(total_tests) + ')', total_tests) + # What variations of gradient errors can we conceive? -for flip in axis_flips: - for permutation in axis_permutations: - for basis in grad_basis: + # Done: + # * Has an axis been flipped? (none, 0, 1, 2) + # * Have axes been swapped? (012 021 102 120 201 210) + # * For both flips & swaps, it could occur in either scanner or image space... - suffix = '_flip' + str(flip) + '_perm' + ''.join(str(item) for item in permutation) + '_' + basis + # To do: + # * Have the gradients been defined with respect to image space rather than scanner space? + # * After conversion to gradients in image space, are they _then_ defined with respect to scanner space? + # (should the above two be tested independently from the axis flips / permutations?) - if basis == 'scanner': - grad = copy.copy(grad_mrtrix) + axis_flips = [ 'none', 0, 1, 2 ] + axis_permutations = [ ( 0, 1, 2 ), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0) ] + grad_basis = [ 'scanner', 'image' ] + total_tests = len(axis_flips) * len(axis_permutations) * len(grad_basis) - # Don't do anything if there aren't any axis flips occurring (flip == 'none') - if isinstance(flip, numbers.Number): - multiplier = [ 1.0, 1.0, 1.0, 1.0 ] - multiplier[flip] = -1.0 - grad = [ [ r*m for r,m in zip(row, multiplier) ] for row in grad ] - grad = [ [ row[permutation[0]], row[permutation[1]], row[permutation[2]], row[3] ] for row in grad ] + # List where the first element is the mean length + lengths = [ ] - # Create the gradient table file - grad_path = 'grad' + suffix + '.b' - with open(grad_path, 'w') as f: - for line in grad: - f.write (','.join([str(v) for v in line]) + '\n') + progress = app.ProgressBar('Testing gradient table alterations (0 of ' + str(total_tests) + ')', total_tests) - grad_option = ' -grad ' + grad_path + for flip in axis_flips: + for permutation in axis_permutations: + for basis in grad_basis: - elif basis == 'image': + suffix = '_flip' + str(flip) + '_perm' + ''.join(str(item) for item in permutation) + '_' + basis - grad = copy.copy(grad_fsl) + if basis == 'scanner': - if isinstance(flip, numbers.Number): - grad[flip] = [ -v for v in grad[flip] ] + grad = copy.copy(grad_mrtrix) - grad = [ grad[permutation[0]], grad[permutation[1]], grad[permutation[2]] ] + # Don't do anything if there aren't any axis flips occurring (flip == 'none') + if isinstance(flip, numbers.Number): + multiplier = [ 1.0, 1.0, 1.0, 1.0 ] + multiplier[flip] = -1.0 + grad = [ [ r*m for r,m in zip(row, multiplier) ] for row in grad ] - grad_path = 'bvecs' + suffix - with open(grad_path, 'w') as f: - for line in grad: - f.write (' '.join([str(v) for v in line]) + '\n') + grad = [ [ row[permutation[0]], row[permutation[1]], row[permutation[2]], row[3] ] for row in grad ] - grad_option = ' -fslgrad ' + grad_path + ' bvals' + # Create the gradient table file + grad_path = 'grad' + suffix + '.b' + with open(grad_path, 'w') as grad_file: + for line in grad: + grad_file.write (','.join([str(v) for v in line]) + '\n') - # Run the tracking experiment - run.command('tckgen -algorithm tensor_det data.mif' + grad_option + ' -seed_image mask.mif -mask mask.mif' + number_option + ' -minlength 0 -downsample 5 tracks' + suffix + '.tck') + grad_option = ' -grad ' + grad_path - # Get the mean track length - meanlength=float(run.command('tckstats tracks' + suffix + '.tck -output mean -ignorezero')[0]) + elif basis == 'image': - # Add to the database - lengths.append([meanlength,flip,permutation,basis]) + grad = copy.copy(grad_fsl) - # Increament the progress bar - progress.increment('Testing gradient table alterations (' + str(len(lengths)) + ' of ' + str(total_tests) + ')') + if isinstance(flip, numbers.Number): + grad[flip] = [ -v for v in grad[flip] ] -progress.done() + grad = [ grad[permutation[0]], grad[permutation[1]], grad[permutation[2]] ] -# Sort the list to find the best gradient configuration(s) -lengths.sort() -lengths.reverse() + grad_path = 'bvecs' + suffix + with open(grad_path, 'w') as bvecs_file: + for line in grad: + bvecs_file.write (' '.join([str(v) for v in line]) + '\n') + grad_option = ' -fslgrad ' + grad_path + ' bvals' -# Provide a printout of the mean streamline length of each gradient table manipulation -sys.stderr.write('Mean length Axis flipped Axis permutations Axis basis\n') -for line in lengths: - if isinstance(line[1], numbers.Number): - flip_str = "{:4d}".format(line[1]) - else: - flip_str = line[1] - sys.stderr.write("{:5.2f}".format(line[0]) + ' ' + flip_str + ' ' + str(line[2]) + ' ' + line[3] + '\n') + # Run the tracking experiment + run.command('tckgen -algorithm tensor_det data.mif' + grad_option + ' -seed_image mask.mif -mask mask.mif' + number_option + ' -minlength 0 -downsample 5 tracks' + suffix + '.tck') + # Get the mean track length + meanlength=float(run.command('tckstats tracks' + suffix + '.tck -output mean -ignorezero').stdout) -# If requested, extract what has been detected as the best gradient table, and -# export it in the format requested by the user -grad_export_option = '' -if app.args.export_grad_mrtrix: - grad_export_option = ' -export_grad_mrtrix ' + path.fromUser(app.args.export_grad_mrtrix, True) -elif app.args.export_grad_fsl: - grad_export_option = ' -export_grad_fsl ' + path.fromUser(app.args.export_grad_fsl[0], True) + ' ' + path.fromUser(app.args.export_grad_fsl[1], True) -if grad_export_option: - best = lengths[0] - suffix = '_flip' + str(best[1]) + '_perm' + ''.join(str(item) for item in best[2]) + '_' + best[3] - if best[3] == 'scanner': - grad_import_option = ' -grad grad' + suffix + '.b' - elif best[3] == 'image': - grad_import_option = ' -fslgrad bvecs' + suffix + ' bvals' - run.command('mrinfo data.mif' + grad_import_option + grad_export_option + (' -force' if app.forceOverwrite else '')) + # Add to the database + lengths.append([meanlength,flip,permutation,basis]) + # Increament the progress bar + progress.increment('Testing gradient table alterations (' + str(len(lengths)) + ' of ' + str(total_tests) + ')') -app.complete() + progress.done() + + # Sort the list to find the best gradient configuration(s) + lengths.sort() + lengths.reverse() + + + # Provide a printout of the mean streamline length of each gradient table manipulation + sys.stderr.write('Mean length Axis flipped Axis permutations Axis basis\n') + for line in lengths: + if isinstance(line[1], numbers.Number): + flip_str = "{:4d}".format(line[1]) + else: + flip_str = line[1] + sys.stderr.write("{:5.2f}".format(line[0]) + ' ' + flip_str + ' ' + str(line[2]) + ' ' + line[3] + '\n') + + + # If requested, extract what has been detected as the best gradient table, and + # export it in the format requested by the user + grad_export_option = app.read_dwgrad_export_options() + if grad_export_option: + best = lengths[0] + suffix = '_flip' + str(best[1]) + '_perm' + ''.join(str(item) for item in best[2]) + '_' + best[3] + if best[3] == 'scanner': + grad_import_option = ' -grad grad' + suffix + '.b' + elif best[3] == 'image': + grad_import_option = ' -fslgrad bvecs' + suffix + ' bvals' + run.command('mrinfo data.mif' + grad_import_option + grad_export_option, force=app.FORCE_OVERWRITE) + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwiintensitynorm b/bin/dwiintensitynorm deleted file mode 100755 index 046d2d596f..0000000000 --- a/bin/dwiintensitynorm +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python - - -# Make the corresponding MRtrix3 Python libraries available -import inspect, os, sys -lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib')) -if not os.path.isdir(lib_folder): - sys.stderr.write('Unable to locate MRtrix3 Python libraries') - sys.exit(1) -sys.path.insert(0, lib_folder) - - -from mrtrix3 import app, file, image, path, run #pylint: disable=redefined-builtin - -from mrtrix3.path import allindir - -def abspath(*arg): - return os.path.abspath(os.path.join(*arg)) - -def relpath(*arg): - return os.path.relpath(os.path.join(*arg), app.workingDir) - - - -class Input(object): - def __init__(self, filename, prefix, directory, mask_filename = '', mask_directory = ''): - self.filename = filename - self.prefix = prefix - self.directory = directory - self.mask_filename = mask_filename - self.mask_directory = mask_directory - -app.init('David Raffelt (david.raffelt@florey.edu.au)', - 'Performs a global DWI intensity normalisation on a group of subjects using the median b=0 white matter value as the reference') -app.cmdline.addDescription('The white matter mask is estimated from a population average FA template then warped back to each subject to perform the intensity normalisation. Note that bias field correction should be performed prior to this step.') -app.cmdline.add_argument('input_dir', help='The input directory containing all DWI images') -app.cmdline.add_argument('mask_dir', help='Input directory containing brain masks, corresponding to one per input image (with the same file name prefix)') -app.cmdline.add_argument('output_dir', help='The output directory containing all of the intensity normalised DWI images') -app.cmdline.add_argument('fa_template', help='The output population specific FA template, which is threshold to estimate a white matter mask') -app.cmdline.add_argument('wm_mask', help='The output white matter mask (in template space), used to estimate the median b=0 white matter value for normalisation') -options = app.cmdline.add_argument_group('Options for the dwiintensitynorm script') -options.add_argument('-fa_threshold', default='0.4', help='The threshold applied to the Fractional Anisotropy group template used to derive an approximate white matter mask (default: 0.4)') -app.parse() - -app.args.input_dir = relpath(app.args.input_dir) -inputDir = app.args.input_dir -if not os.path.exists(inputDir): - app.error('input directory not found') -inFiles = allindir(inputDir, dir_path=False) -if len(inFiles) <= 1: - app.console('not enough images found in input directory. More than one image is needed to perform a group-wise intensity normalisation') -else: - app.console('performing global intensity normalisation on ' + str(len(inFiles)) + ' input images') - -app.args.mask_dir = relpath(app.args.mask_dir) -maskDir = app.args.mask_dir -if not os.path.exists(maskDir): - app.error('mask directory not found') -maskFiles = allindir(maskDir, dir_path=False) -if len(maskFiles) != len(inFiles): - app.error('the number of images in the mask directory does not equal the number of images in the input directory') -maskCommonPostfix = path.commonPostfix(maskFiles) -maskPrefixes = [] -for m in maskFiles: - maskPrefixes.append(m.split(maskCommonPostfix)[0]) - -commonPostfix = path.commonPostfix(inFiles) -input_list = [] -for i in inFiles: - subj_prefix = i.split(commonPostfix)[0] - if subj_prefix not in maskPrefixes: - app.error ('no matching mask image was found for input image ' + i) - image.check3DNonunity(os.path.join(path.fromUser(inputDir, False), i)) - index = maskPrefixes.index(subj_prefix) - input_list.append(Input(i, subj_prefix, path.fromUser(inputDir, False), maskFiles[index], path.fromUser(maskDir, False))) - -app.checkOutputPath(app.args.fa_template) -app.checkOutputPath(app.args.wm_mask) -app.checkOutputPath(app.args.output_dir) -file.makeDir(app.args.output_dir) - -app.makeTempDir() - -maskTempDir = os.path.join(app.tempDir, os.path.basename(os.path.normpath(maskDir))) -run.command('cp -R -L ' + maskDir + ' ' + maskTempDir) - -app.gotoTempDir() - -file.makeDir('fa') -progress = app.progressBar('Computing FA images', len(input_list)) -for i in input_list: - run.command('dwi2tensor ' + abspath(i.directory, i.filename) + ' -mask ' + abspath(i.mask_directory, i.mask_filename) + ' - | tensor2metric - -fa ' + os.path.join('fa', i.prefix + '.mif')) - progress.increment() -progress.done() - -app.console('Generating FA population template') -run.command('population_template fa -mask_dir ' + maskTempDir + ' fa_template.mif -type rigid_affine_nonlinear -rigid_scale 0.25,0.5,0.8,1.0 -affine_scale 0.7,0.8,1.0,1.0 -nl_scale 0.5,0.75,1.0,1.0,1.0 -nl_niter 5,5,5,5,5 -tempdir population_template -linear_no_pause -nocleanup') - -app.console('Generating WM mask in template space') -run.command('mrthreshold fa_template.mif -abs ' + app.args.fa_threshold + ' template_wm_mask.mif') - -progress = app.progressBar('Intensity normalising subject images', len(input_list)) -file.makeDir('wm_mask_warped') -for i in input_list: - run.command('mrtransform template_wm_mask.mif -interp nearest -warp_full ' + os.path.join('population_template', 'warps', i.prefix + '.mif') + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' -from 2 -template ' + os.path.join('fa', i.prefix + '.mif')) - run.command('dwinormalise ' + abspath(i.directory, i.filename) + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' ' + path.fromUser(os.path.join(app.args.output_dir, i.filename), True) + (' -force' if app.forceOverwrite else '')) - progress.increment() -progress.done() - -app.console('Exporting template images to user locations') -run.command('mrconvert template_wm_mask.mif ' + path.fromUser(app.args.wm_mask, True) + (' -force' if app.forceOverwrite else '')) -run.command('mrconvert fa_template.mif ' + path.fromUser(app.args.fa_template, True) + (' -force' if app.forceOverwrite else '')) - -app.complete() diff --git a/bin/dwinormalise b/bin/dwinormalise new file mode 100755 index 0000000000..114e8f1ac1 --- /dev/null +++ b/bin/dwinormalise @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# Script that performs intensity normalisation of DWIs in various ways + + +def usage(cmdline): #pylint: disable=unused-variable + from mrtrix3 import algorithm #pylint: disable=no-name-in-module, import-outside-toplevel + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Perform various forms of intensity normalisation of DWIs') + cmdline.add_description('This script provides access to different techniques for globally scaling the intensity of diffusion-weighted images. ' + 'The different algorithms have different purposes, and different requirements with respect to the data with which they must be provided & will produce as output. ' + 'Further information on the individual algorithms available can be accessed via their individual help pages; eg. "dwinormalise group -help".') + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module, import-outside-toplevel + + # Find out which algorithm the user has requested + alg = algorithm.get_module(app.ARGS.algorithm) + alg.check_output_paths() + + # From here, the script splits depending on what algorithm is being used + alg.execute() + + + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwipreproc b/bin/dwipreproc deleted file mode 100755 index 00e39ad16a..0000000000 --- a/bin/dwipreproc +++ /dev/null @@ -1,1133 +0,0 @@ -#!/usr/bin/env python - -# Script for performing DWI pre-processing using FSL 5.0 tools eddy / topup / applytopup - -# This script is generally one of the first operations that will be applied to diffusion image data. The precise details of how this image pre-processing takes place depends heavily on the DWI acquisition; specifically, the presence or absence of reversed phase-encoding data for the purposes of EPI susceptibility distortion correction. - -# The script is capable of handling a wide range of DWI acquisitions with respect to the design of phase encoding directions. This is dependent upon information regarding the phase encoding being embedded within theimage headers. The relevant information should be captured by MRtrix when importing DICOM images; it should also be the case for BIDS-compatible datasets. If the user expects this information to be present within the image headers, the -rpe_header option must be specified. - -# If however such information is not present in the image headers, then it is also possible for the user to manually specify the relevant information regarding phase encoding. This involves the following information: -# * The fundamental acquisition protocol design regarding phase encoding. There are three common acquisition designs that are supported: -# 1. All DWI volumes acquired using the same phase encode parameters, and no additional volumes acquired for the purpose of estimating the inhomogeneity field. In this case, eddy will only perform motion and eddy current distortion correction. This configuration is specified using the -rpe_none option. -# 2. All DWI volumes acquired using the same phase encode parameters; but for the purpose of estimating the inhomogeneity field (and subsequently correcting the resulting distortions in the DWIs), an additional pair (or multiple pairs) of image volumes are acquired, where the first volume(s) has the same phase encoding parameters as the input DWI series, and the second volume(s) has precisely the opposite phase encoding. This configuration is specified using the -rpe_pair option; and the user must additionally provide those images to be used for field estimation using the -se_epi option. -# 3. Every DWI gradient direction is acquired twice: once with one phase encoding configuration, and again using the opposite phase encode direction. The goal here is to combine each pair of images into a single DWI volume per gradient direction, where that recombination takes advantage of the information gained from having two volumes where the signal is distorted in opposite directions in the presence of field inhomogeneity. -# * The (primary) direction of phase encoding. In cases where opposing phase encoding is part of the acquisition protocol (i.e. the reversed phase-encode pair in case 2 above, and all of the DWIs in case 3 above), the -pe_dir option specifies the phase encode direction of the _first_ volume in the relevant volume pair; the second is assumed to be the exact opposite. -# * The total readout time of the EPI acquisition. This affects the magnitude of the image distortion for a given field inhomogeneity. If this information is not provided via the -readout_time option, then a 'sane' default of 0.1s will be assumed. Note that this is not actually expected to influence the estimation of the field; it will result in the field inhomogeneity estimation being scaled by some factor, but as long as it uses the same sane default for the DWIs, the distortion correction should operate as expected. - -# Make the corresponding MRtrix3 Python libraries available -import inspect, os, sys -lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib')) -if not os.path.isdir(lib_folder): - sys.stderr.write('Unable to locate MRtrix3 Python libraries') - sys.exit(1) -sys.path.insert(0, lib_folder) - -import math, itertools, shutil, subprocess -from mrtrix3 import app, file, fsl, image, path, phaseEncoding, run #pylint: disable=redefined-builtin - - -app.init ('Robert E. Smith (robert.smith@florey.edu.au)', - 'Perform diffusion image pre-processing using FSL\'s eddy tool; including inhomogeneity distortion correction using FSL\'s topup tool if possible') -app.cmdline.addDescription('Note that this script does not perform any explicit registration between images provided to topup via the -se_epi option, and the DWI volumes provided to eddy. In some instances (motion between acquisitions) this can result in erroneous application of the inhomogeneity field during distortion correction. If this could potentially be a problem for your data, a possible solution is to insert the first b=0 DWI volume to be the first volume of the image file provided via the -se_epi option. This will hopefully be addressed within the script itself in a future update.') -app.cmdline.addCitation('', 'Andersson, J. L. & Sotiropoulos, S. N. An integrated approach to correction for off-resonance effects and subject movement in diffusion MR imaging. NeuroImage, 2015, 125, 1063-1078', True) -app.cmdline.addCitation('', 'Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', True) -app.cmdline.addCitation('If performing recombination of diffusion-weighted volume pairs with opposing phase encoding directions', 'Skare, S. & Bammer, R. Jacobian weighting of distortion corrected EPI data. Proceedings of the International Society for Magnetic Resonance in Medicine, 2010, 5063', True) -app.cmdline.addCitation('If performing EPI susceptibility distortion correction', 'Andersson, J. L.; Skare, S. & Ashburner, J. How to correct susceptibility distortions in spin-echo echo-planar images: application to diffusion tensor imaging. NeuroImage, 2003, 20, 870-888', True) -app.cmdline.addCitation('If including "--repol" in -eddy_options input', 'Andersson, J. L. R.; Graham, M. S.; Zsoldos, E. & Sotiropoulos, S. N. Incorporating outlier detection and replacement into a non-parametric framework for movement and distortion correction of diffusion MR images. NeuroImage, 2016, 141, 556-572', True) -app.cmdline.addCitation('If including "--mporder" in -eddy_options input', 'Andersson, J. L. R.; Graham, M. S.; Drobnjak, I.; Zhang, H.; Filippini, N. & Bastiani, M. Towards a comprehensive framework for movement and distortion correction of diffusion MR images: Within volume movement. NeuroImage, 2017, 152, 450-466', True) -app.cmdline.add_argument('input', help='The input DWI series to be corrected') -app.cmdline.add_argument('output', help='The output corrected image series') -grad_export_options = app.cmdline.add_argument_group('Options for exporting the diffusion gradient table') -grad_export_options.add_argument('-export_grad_mrtrix', metavar='grad', help='Export the final gradient table in MRtrix format') -grad_export_options.add_argument('-export_grad_fsl', nargs=2, metavar=('bvecs', 'bvals'), help='Export the final gradient table in FSL bvecs/bvals format') -app.cmdline.flagMutuallyExclusiveOptions( [ 'export_grad_mrtrix', 'export_grad_fsl' ] ) -grad_import_options = app.cmdline.add_argument_group('Options for importing the diffusion gradient table') -grad_import_options.add_argument('-grad', help='Provide a gradient table in MRtrix format') -grad_import_options.add_argument('-fslgrad', nargs=2, metavar=('bvecs', 'bvals'), help='Provide a gradient table in FSL bvecs/bvals format') -app.cmdline.flagMutuallyExclusiveOptions( [ 'grad', 'fslgrad' ] ) -general_options = app.cmdline.add_argument_group('Other options for the dwipreproc script') -general_options.add_argument('-pe_dir', metavar=('PE'), help='Manually specify the phase encoding direction of the input series; can be a signed axis number (e.g. -0, 1, +2), an axis designator (e.g. RL, PA, IS), or NIfTI axis codes (e.g. i-, j, k)') -general_options.add_argument('-readout_time', metavar=('time'), type=float, help='Manually specify the total readout time of the input series (in seconds)') -general_options.add_argument('-se_epi', metavar=('image'), help='Provide an additional image series consisting of spin-echo EPI images, which is to be used exclusively by topup for estimating the inhomogeneity field (i.e. it will not form part of the output image series)') -general_options.add_argument('-align_seepi', action='store_true', help='Achieve alignment between the SE-EPI images used for inhomogeneity field estimation, and the DWIs, by inserting the first DWI b=0 volume to the SE-EPI series. Only use this option if the input SE-EPI images have identical image contrast to the b=0 images present in the DWI series.') -general_options.add_argument('-json_import', metavar=('file'), help='Import image header information from an associated JSON file (may be necessary to determine phase encoding information)') -general_options.add_argument('-topup_options', metavar=('TopupOptions'), help='Manually provide additional command-line options to the topup command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to topup)') -general_options.add_argument('-eddy_options', metavar=('EddyOptions'), help='Manually provide additional command-line options to the eddy command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to eddy)') -general_options.add_argument('-eddyqc_text', metavar=('directory'), help='Copy the various text-based statistical outputs generated by eddy into an output directory') -general_options.add_argument('-eddyqc_all', metavar=('directory'), help='Copy ALL outputs generated by eddy (including images) into an output directory') -rpe_options = app.cmdline.add_argument_group('Options for specifying the acquisition phase-encoding design; note that one of the -rpe_* options MUST be provided') -rpe_options.add_argument('-rpe_none', action='store_true', help='Specify that no reversed phase-encoding image data is being provided; eddy will perform eddy current and motion correction only') -rpe_options.add_argument('-rpe_pair', action='store_true', help='Specify that a set of images (typically b=0 volumes) will be provided for use in inhomogeneity field estimation only (using the -se_epi option). It is assumed that the FIRST volume(s) of this image has the SAME phase-encoding direction as the input DWIs, and the LAST volume(s) has precisely the OPPOSITE phase encoding') -rpe_options.add_argument('-rpe_all', action='store_true', help='Specify that ALL DWIs have been acquired with opposing phase-encoding; this information will be used to perform a recombination of image volumes (each pair of volumes with the same b-vector but different phase encoding directions will be combined together into a single volume). It is assumed that the SECOND HALF of the volumes in the input DWIs have corresponding diffusion sensitisation directions to the FIRST HALF, but were acquired using precisely the opposite phase-encoding direction') -rpe_options.add_argument('-rpe_header', action='store_true', help='Specify that the phase-encoding information can be found in the image header(s), and that this is the information that the script should use') -app.cmdline.flagMutuallyExclusiveOptions( [ 'rpe_none', 'rpe_pair', 'rpe_all', 'rpe_header' ], True ) -app.cmdline.flagMutuallyExclusiveOptions( [ 'rpe_none', 'se_epi' ], False ) # May still technically provide -se_epi even with -rpe_all -app.cmdline.flagMutuallyExclusiveOptions( [ 'rpe_header', 'pe_dir' ], False ) # Can't manually provide phase-encoding direction if expecting it to be in the header -app.cmdline.flagMutuallyExclusiveOptions( [ 'rpe_header', 'readout_time' ], False ) # Can't manually provide readout time if expecting it to be in the header -app.cmdline.flagMutuallyExclusiveOptions( [ 'eddyqc_text', 'eddyqc_all' ], False ) -app.parse() - - -if app.isWindows(): - app.error('Script cannot run on Windows due to FSL dependency') - -image.check3DNonunity(path.fromUser(app.args.input, False)) - -PE_design = '' -if app.args.rpe_none: - PE_design = 'None' -elif app.args.rpe_pair: - PE_design = 'Pair' - if not app.args.se_epi: - app.error('If using the -rpe_pair option, the -se_epi option must be used to provide the spin-echo EPI data to be used by topup') -elif app.args.rpe_all: - PE_design = 'All' -elif app.args.rpe_header: - PE_design = 'Header' -else: - app.error('Must explicitly specify phase-encoding acquisition design (even if none)') - -if app.args.align_seepi and not app.args.se_epi: - app.error('-align_seepi option is only applicable when the -se_epi option is also used') - -fsl_path = os.environ.get('FSLDIR', '') -if not fsl_path: - app.error('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - -if not PE_design == 'None': - topup_config_path = os.path.join(fsl_path, 'etc', 'flirtsch', 'b02b0.cnf') - if not os.path.isfile(topup_config_path): - app.error('Could not find necessary default config file for FSL topup command\n(expected location: ' + topup_config_path + ')') - topup_cmd = fsl.exeName('topup') - applytopup_cmd = fsl.exeName('applytopup') - -if not fsl.eddyBinary(True) and not fsl.eddyBinary(False): - app.error('Could not find any version of FSL eddy command') -fsl_suffix = fsl.suffix() -app.checkOutputPath(app.args.output) - -# Export the gradient table to the path requested by the user if necessary -grad_export_option = '' -if app.args.export_grad_mrtrix: - grad_export_option = ' -export_grad_mrtrix ' + path.fromUser(app.args.export_grad_mrtrix, True) - app.checkOutputPath(path.fromUser(app.args.export_grad_mrtrix, False)) -elif app.args.export_grad_fsl: - grad_export_option = ' -export_grad_fsl ' + path.fromUser(app.args.export_grad_fsl[0], True) + ' ' + path.fromUser(app.args.export_grad_fsl[1], True) - app.checkOutputPath(path.fromUser(app.args.export_grad_fsl[0], False)) - app.checkOutputPath(path.fromUser(app.args.export_grad_fsl[1], False)) - - -eddyqc_path = None -eddyqc_files = [ 'eddy_parameters', 'eddy_movement_rms', 'eddy_restricted_movement_rms', \ - 'eddy_post_eddy_shell_alignment_parameters', 'eddy_post_eddy_shell_PE_translation_parameters', \ - 'eddy_outlier_report', 'eddy_outlier_map', 'eddy_outlier_n_stdev_map', 'eddy_outlier_n_sqr_stdev_map', \ - 'eddy_movement_over_time' ] -eddyqc_images = [ 'eddy_outlier_free_data', 'eddy_cnr_maps', 'eddy_residuals' ] -if app.args.eddyqc_text: - eddyqc_path = path.fromUser(app.args.eddyqc_text, False) -elif app.args.eddyqc_all: - eddyqc_path = path.fromUser(app.args.eddyqc_all, False) -if eddyqc_path: - if os.path.exists(eddyqc_path): - if os.path.isdir(eddyqc_path): - if any([ os.path.exists(os.path.join(eddyqc_path, filename)) for filename in eddyqc_files ]): - if app.forceOverwrite: - app.warn('Output eddy QC directory already contains relevant files; these will be overwritten on completion') - else: - app.error('Output eddy QC directory already contains relevant files (use -force to override)') - else: - if app.forceOverwrite: - app.warn('Target for eddy QC output is not a directory; it will be overwritten on completion') - else: - app.error('Target for eddy QC output exists, and is not a directory (use -force to override)') - - -eddy_manual_options = [] -if app.args.eddy_options: - # Initially process as a list; we'll convert back to a string later - eddy_manual_options = app.args.eddy_options.strip().split() - - -# Convert all input images into MRtrix format and store in temprary directory first -app.makeTempDir() - -grad_option = '' -if app.args.grad: - grad_option = ' -grad ' + path.fromUser(app.args.grad, True) -elif app.args.fslgrad: - grad_option = ' -fslgrad ' + path.fromUser(app.args.fslgrad[0], True) + ' ' + path.fromUser(app.args.fslgrad[1], True) -json_option = '' -if app.args.json_import: - json_option = ' -json_import ' + path.fromUser(app.args.json_import, True) -run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('dwi.mif', True) + grad_option + json_option) -if app.args.se_epi: - image.check3DNonunity(path.fromUser(app.args.se_epi, False)) - run.command('mrconvert ' + path.fromUser(app.args.se_epi, True) + ' ' + path.toTemp('se_epi.mif', True)) - -app.gotoTempDir() - - -# Get information on the input images, and check their validity -dwi_header = image.Header('dwi.mif') -if not len(dwi_header.size()) == 4: - app.error('Input DWI must be a 4D image') -dwi_num_volumes = dwi_header.size()[3] -app.var(dwi_num_volumes) -dwi_num_slices = dwi_header.size()[2] -app.var(dwi_num_slices) -dwi_pe_scheme = phaseEncoding.getScheme(dwi_header) -if app.args.se_epi: - se_epi_header = image.Header('se_epi.mif') - # This doesn't necessarily apply any more: May be able to combine e.g. a P>>A from -se_epi with an A>>P b=0 image from the DWIs -# if not len(se_epi_header.size()) == 4: -# app.error('File provided using -se_epi option must contain more than one image volume') - se_epi_pe_scheme = phaseEncoding.getScheme(se_epi_header) -if 'dw_scheme' not in dwi_header.keyval(): - app.error('No diffusion gradient table found') -grad = dwi_header.keyval()['dw_scheme'] -if not len(grad) == dwi_num_volumes: - app.error('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(dwi_num_volumes) + ' volumes); check your input data') - -# Check the manual options being passed to eddy, ensure they make sense -eddy_mporder = any(s.startswith('--mporder') for s in eddy_manual_options) -if eddy_mporder: - if 'SliceEncodingDirection' in dwi_header.keyval(): - slice_encoding_direction = dwi_header.keyval()['SliceEncodingDirection'] - app.var(slice_encoding_direction) - if not slice_encoding_direction.startswith('k'): - app.error('DWI header indicates that 3rd spatial axis is not the slice axis; this is not yet compatible with --mporder option in eddy, nor supported in dwipreproc') - slice_encoding_direction = image.axis2dir(slice_encoding_direction) - else: - app.console('No slice encoding direction information present; assuming third axis corresponds to slices') - slice_encoding_direction = [0,0,1] -if '--resamp=lsr' in eddy_manual_options: - app.error('dwipreproc does not currently support least-squares reconstruction; this cannot be simply passed via -eddy_options') -if eddy_mporder: - slspec_option = [ s for s in eddy_manual_options if s.startswith('--slspec') ] - slice_groups = [ ] - slice_timing = [ ] - if len(slspec_option) > 1: - app.error('--slspec option appears more than once in -eddy_options input; cannot import slice timing') - elif len(slspec_option) == 1: - slspec_file_path = path.fromUser(slspec_option[0][9:], False) - if os.path.isfile(slspec_file_path): - # Since there's a chance that we may need to pad this info, we can't just copy this file - # to the temporary directory... - with open(slspec_file_path, 'r') as f: - for line in f: - line = line.strip() - if line: - slice_groups.append([int(value) for value in line.split()]) - app.var(slice_groups) - # Remove this entry from eddy_manual_options; it'll be inserted later, with the - # path to the new slspec file - eddy_manual_options = [ s for s in eddy_manual_options if not s.startswith('--slspec') ] - else: - app.error('Unable to find \'slspec\' file provided via -eddy_options \" ... --slspec=/path/to/file ... \" (expected location: ' + slspec_file_path + ')') - else: - if 'SliceTiming' not in dwi_header.keyval(): - app.error('Cannot perform slice-to-volume correction in eddy: No slspec file provided, and no slice timing information present in header') - slice_timing = dwi_header.keyval()['SliceTiming'][0] - app.var(slice_timing) - if len(slice_timing) != dwi_num_slices: - app.error('Cannot use slice timing information in image header for slice-to-volume correction: Number of entries (' + len(slice_timing) + ') does not match number of slices (' + dwi_header.size()[2] + ')') - -# Use new features of dirstat to query the quality of the diffusion acquisition scheme -# Need to know the mean b-value in each shell, and the asymmetry value of each shell -# But don't bother testing / warning the user if they're already controlling for this -if not app.args.eddy_options or not any(s.startswith('--slm=') for s in app.args.eddy_options.split()): - shell_bvalues = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] - shell_asymmetry = [ float(value) for value in run.command('dirstat dwi.mif -output asym')[0].splitlines() ] - # dirstat will skip any b=0 shell by default; therefore for correspondence between - # shell_bvalues and shell_symmetry, need to remove any b=0 from the former - if len(shell_bvalues) == len(shell_asymmetry) + 1: - shell_bvalues = shell_bvalues[1:] - elif len(shell_bvalues) != len(shell_asymmetry): - app.error('Number of b-values reported by mrinfo (' + str(len(shell_bvalues)) + ') does not match number of outputs provided by dirstat (' + str(len(shell_asymmetry)) + ')') - for b, s in zip(shell_bvalues, shell_asymmetry): - if s >= 0.1: - app.warn('sampling of b=' + str(b) + ' shell is ' + ('strongly' if s >= 0.4 else 'moderately') + \ - ' asymmetric; distortion correction may benefit from use of: ' + \ - '-eddy_options " ... --slm=linear ... "') - - -# Since we want to access user-defined phase encoding information regardless of whether or not -# such information is present in the header, let's grab it here -manual_pe_dir = None -if app.args.pe_dir: - manual_pe_dir = [ float(i) for i in phaseEncoding.direction(app.args.pe_dir) ] -app.var(manual_pe_dir) -manual_trt = None -if app.args.readout_time: - manual_trt = float(app.args.readout_time) -app.var(manual_trt) - - -do_topup = (not PE_design == 'None') - - -def grads_match(one, two): - # Dot product between gradient directions - # First, need to check for zero-norm vectors: - # - If both are zero, skip this check - # - If one is zero and the other is not, volumes don't match - # - If neither is zero, test the dot product - if any(one[0:3]): - if not any(two[0:3]): - return False - dot_product = one[0]*two[0] + one[1]*two[1] + one[2]*two[2] - if abs(dot_product) < 0.999: - return False - elif any(two[0:3]): - return False - # b-value - if abs(one[3]-two[3]) > 10.0: - return False - return True - - -# Manually generate a phase-encoding table for the input DWI based on user input -dwi_manual_pe_scheme = None -se_epi_manual_pe_scheme = None -auto_trt = 0.1 -dwi_auto_trt_warning = False -if manual_pe_dir: - - if manual_trt: - trt = manual_trt - else: - trt = auto_trt - dwi_auto_trt_warning = True - - # Still construct the manual PE scheme even with 'None' or 'Pair': - # there may be information in the header that we need to compare against - if PE_design == 'None': - line = list(manual_pe_dir) - line.append(trt) - dwi_manual_pe_scheme = [ line ] * dwi_num_volumes - app.var(dwi_manual_pe_scheme) - - # With 'Pair', also need to construct the manual scheme for SE EPIs - elif PE_design == 'Pair': - line = list(manual_pe_dir) - line.append(trt) - dwi_manual_pe_scheme = [ line ] * dwi_num_volumes - app.var(dwi_manual_pe_scheme) - se_epi_num_volumes = se_epi_header.size()[3] - if se_epi_num_volumes%2: - app.error('If using -rpe_pair option, image provided using -se_epi must contain an even number of volumes') - # Assume that first half of volumes have same direction as series; - # second half have the opposite direction - se_epi_manual_pe_scheme = [ line ] * int(se_epi_num_volumes/2) - line = [ (-i if i else 0.0) for i in manual_pe_dir ] - line.append(trt) - se_epi_manual_pe_scheme.extend( [ line ] * int(se_epi_num_volumes/2) ) - app.var(se_epi_manual_pe_scheme) - - # If -rpe_all, need to scan through grad and figure out the pairings - # This will be required if relying on user-specified phase encode direction - # It will also be required at the end of the script for the manual recombination - # Update: The possible permutations of volume-matched acquisition is limited within the - # context of the -rpe_all option. In particular, the potential for having more - # than one b=0 volume within each half means that it is not possible to permit - # arbitrary ordering of those pairs, since b=0 volumes would then be matched - # despite having the same phase-encoding direction. Instead, explicitly enforce - # that volumes must be matched between the first and second halves of the DWI data. - elif PE_design == 'All': - if dwi_num_volumes%2: - app.error('If using -rpe_all option, input image must contain an even number of volumes') - grads_matched = [ dwi_num_volumes ] * dwi_num_volumes - grad_pairs = [ ] - app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') - for index1 in range(int(dwi_num_volumes/2)): - if grads_matched[index1] == dwi_num_volumes: # As yet unpaired - for index2 in range(int(dwi_num_volumes/2), dwi_num_volumes): - if grads_matched[index2] == dwi_num_volumes: # Also as yet unpaired - if grads_match(grad[index1], grad[index2]): - grads_matched[index1] = index2 - grads_matched[index2] = index1 - grad_pairs.append([index1, index2]) - app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + ': ' + str(grad[index1]) + ' ' + str(grad[index2])) - break - else: - app.error('Unable to determine matching reversed phase-encode direction volume for DWI volume ' + str(index1)) - if not len(grad_pairs) == dwi_num_volumes/2: - app.error('Unable to determine complete matching DWI volume pairs for reversed phase-encode combination') - # Construct manual PE scheme here: - # Regardless of whether or not there's a scheme in the header, need to have it: - # if there's one in the header, want to compare to the manually-generated one - dwi_manual_pe_scheme = [ ] - for index in range(0, dwi_num_volumes): - line = list(manual_pe_dir) - if index >= int(dwi_num_volumes/2): - line = [ (-i if i else 0.0) for i in line ] - line.append(trt) - dwi_manual_pe_scheme.append(line) - app.var(dwi_manual_pe_scheme) - -else: # No manual phase encode direction defined - - if not PE_design == 'Header': - app.error('If not using -rpe_header, phase encoding direction must be provided using the -pe_dir option') - - - -def scheme_dirs_match(one, two): - for line_one, line_two in zip(one, two): - if not line_one[0:3] == line_two[0:3]: - return False - return True - -def scheme_times_match(one, two): - for line_one, line_two in zip(one, two): - if abs(line_one[3] - line_two[3]) > 5e-3: - return False - return True - - - -# Determine whether or not the phase encoding table generated manually should be used -# (possibly instead of a table present in the image header) -overwrite_dwi_pe_scheme = False -if dwi_pe_scheme: - if manual_pe_dir: - # Compare manual specification to that read from the header; - # overwrite & give warning to user if they differ - # Bear in mind that this could even be the case for -rpe_all; - # relying on earlier code having successfully generated the 'appropriate' - # PE scheme for the input volume based on the diffusion gradient table - if not scheme_dirs_match(dwi_pe_scheme, dwi_manual_pe_scheme): - app.warn('User-defined phase-encoding direction design does not match what is stored in DWI image header; proceeding with user specification') - overwrite_dwi_pe_scheme = True - if manual_trt: - # Compare manual specification to that read from the header - if not scheme_times_match(dwi_pe_scheme, dwi_manual_pe_scheme): - app.warn('User-defined total readout time does not match what is stored in DWI image header; proceeding with user specification') - overwrite_dwi_pe_scheme = True - if overwrite_dwi_pe_scheme: - dwi_pe_scheme = dwi_manual_pe_scheme # May be used later for triggering volume recombination - else: - dwi_manual_pe_scheme = None # To guarantee that these generated data are never used -else: - # Nothing in the header; rely entirely on user specification - if PE_design == 'Header': - app.error('No phase encoding information found in DWI image header') - if not manual_pe_dir: - app.error('No phase encoding information provided either in header or at command-line') - if dwi_auto_trt_warning: - app.console('Total readout time not provided at command-line; assuming sane default of ' + str(auto_trt)) - dwi_pe_scheme = dwi_manual_pe_scheme # May be needed later for triggering volume recombination - -# This may be required by -rpe_all for extracting b=0 volumes while retaining phase-encoding information -import_dwi_pe_table_option = '' -if dwi_manual_pe_scheme: - with open('dwi_manual_pe_scheme.txt', 'w') as f: - for line in dwi_manual_pe_scheme: - f.write(' '.join( [ str(value) for value in line ] ) + '\n') - import_dwi_pe_table_option = ' -import_pe_table dwi_manual_pe_scheme.txt' - - -# Find the index of the first DWI volume that is a b=0 volume -# This needs to occur at the outermost loop as it is pertinent information -# not only for the -align_seepi option, but also for when the -se_epi option -# is not provided at all, and the input top topup is extracted solely from the DWIs -bzero_threshold = 10.0 -if 'BZeroThreshold' in app.config: - bzero_threshold = float(app.config['BZeroThreshold']) -dwi_first_bzero_index = 0 -for line in grad: - if line[3] <= bzero_threshold: - break - dwi_first_bzero_index += 1 -app.var(dwi_first_bzero_index) - - -# Deal with the phase-encoding of the images to be fed to topup (if applicable) -overwrite_se_epi_pe_scheme = False -se_epi_path = 'se_epi.mif' -dwi_permute_volumes_pre_eddy_option = '' -dwi_permute_volumes_post_eddy_option = '' -dwi_bzero_added_to_se_epi = False -if app.args.se_epi: - - # Newest version of eddy requires that topup field be on the same grid as the eddy input DWI - if not image.match(dwi_header, se_epi_header, 3): - # If the number of voxels and voxel size are equivalent, but the header - # transformation is not, let's replace the header transformation of the - # SE-EPI image with that of the DWIs, so that the grids do overlap - # But only do this if we are using the -align_seepi option; - # in that case we're essentially rigid transforming the provided - # SE-EPI data according to the shift in the FoV. If this option is - # _not_ used, then we can only guess that the real-space position of - # the subject has not moved, and therefore an explicit re-grid is necessary - if app.args.align_seepi and image.match(dwi_header, se_epi_header, 3, False): - app.console('SE-EPI images used for inhomogeneity field estimation have a ' - 'different FoV placement to the DWIs, but voxel size / dimensions are identical; ' - 'replacing header transformation with that of DWIs') - new_se_epi_path = 'se_epi_transform.mif' - run.command('mrtransform ' + se_epi_path + ' -replace dwi.mif ' + new_se_epi_path) - else: - app.console('DWIs and SE-EPI images used for inhomogeneity field estimation are defined on different image grids; ' - 'the latter will be automatically re-gridded to match the former') - new_se_epi_path = 'se_epi_regrid.mif' - run.command('mrtransform ' + se_epi_path + ' - -interp sinc -template dwi.mif | mrcalc - 0.0 -max ' + new_se_epi_path) - file.delTemporary(se_epi_path) - se_epi_path = new_se_epi_path - se_epi_header = image.Header(se_epi_path) - - - # 3 possible sources of PE information: DWI header, topup image header, command-line - # Any pair of these may conflict, and any one could be absent - - # Have to switch here based on phase-encoding acquisition design - if PE_design == 'Pair': - # Criteria: - # * If present in own header, ignore DWI header entirely - - # - If also provided at command-line, look for conflict & report - # - If not provided at command-line, nothing to do - # * If _not_ present in own header: - # - If provided at command-line, infer appropriately - # - If not provided at command-line, but the DWI header has that information, infer appropriately - if se_epi_pe_scheme: - if manual_pe_dir: - if not scheme_dirs_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): - app.warn('User-defined phase-encoding direction design does not match what is stored in SE EPI image header; proceeding with user specification') - overwrite_se_epi_pe_scheme = True - if manual_trt: - if not scheme_times_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): - app.warn('User-defined total readout time does not match what is stored in SE EPI image header; proceeding with user specification') - overwrite_se_epi_pe_scheme = True - if overwrite_se_epi_pe_scheme: - se_epi_pe_scheme = se_epi_manual_pe_scheme - else: - se_epi_manual_pe_scheme = None # To guarantee that these data are never used - else: - overwrite_se_epi_pe_scheme = True - se_epi_pe_scheme = se_epi_manual_pe_scheme - - elif PE_design == 'All': - # Criteria: - # * If present in own header: - # - Nothing to do - # * If _not_ present in own header: - # - Don't have enough information to proceed - # - Is this too harsh? (e.g. Have rules by which it may be inferred from the DWI header / command-line) - if not se_epi_pe_scheme: - app.error('If explicitly including SE EPI images when using -rpe_all option, they must come with their own associated phase-encoding information in the image header') - - elif PE_design == 'Header': - # Criteria: - # * If present in own header: - # Nothing to do (-pe_dir option is mutually exclusive) - # * If _not_ present in own header: - # Cannot proceed - if not se_epi_pe_scheme: - app.error('No phase-encoding information present in SE-EPI image header') - # If there is no phase encoding contrast within the SE-EPI series, - # try combining it with the DWI b=0 volumes, see if that produces some contrast - # However, this should probably only be permitted if the -align_seepi option is defined - se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() - if not se_epi_pe_scheme_has_contrast: - if app.args.align_seepi: - app.console('No phase-encoding contrast present in SE-EPI images; will examine again after combining with DWI b=0 images') - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_dwibzeros.mif' - # Don't worry about trying to produce a balanced scheme here - run.command('dwiextract dwi.mif - -bzero | mrcat - ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') - se_epi_header = image.Header(new_se_epi_path) - se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() - if se_epi_pe_scheme_has_contrast: - file.delTemporary(se_epi_path) - se_epi_path = new_se_epi_path - se_epi_pe_scheme = phaseEncoding.getScheme(se_epi_header) - dwi_bzero_added_to_se_epi = True - # Delay testing appropriateness of the concatenation of these images - # (i.e. differences in contrast) to later - else: - app.error('No phase-encoding contrast present in SE-EPI images, even after concatenating with b=0 images due to -align_seepi option; ' - 'cannot perform inhomogeneity field estimation') - else: - app.error('No phase-encoding contrast present in SE-EPI images; cannot perform inhomogeneity field estimation') - - if app.args.align_seepi: - - dwi_te = dwi_header.keyval().get('EchoTime') - se_epi_te = se_epi_header.keyval().get('EchoTime') - if dwi_te and se_epi_te and dwi_te != se_epi_te: - app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different echo time to the DWIs being corrected. ' - 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup ' - 'due to use of the -align_seepi option.') - - dwi_tr = dwi_header.keyval().get('RepetitionTime') - se_epi_tr = se_epi_header.keyval().get('RepetitionTime') - if dwi_tr and se_epi_tr and dwi_tr != se_epi_tr: - app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different repetition time to the DWIs being corrected. ' - 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup ' - 'due to use of the -align_seepi option.') - - dwi_flip = dwi_header.keyval().get('FlipAngle') - se_epi_flip = se_epi_header.keyval().get('FlipAngle') - if dwi_flip and se_epi_flip and dwi_flip != se_epi_flip: - app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different flip angle to the DWIs being corrected. ' - 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup ' - 'due to use of the -align_seepi option.') - - # If we are using the -se_epi option, and hence the input images to topup have not come from the DWIs themselves, - # we need to insert the first b=0 DWI volume to the start of the topup input image. Otherwise, the field estimated - # by topup will not be correctly aligned with the volumes as they are processed by eddy. - # - # However, there's also a code path by which we may have already performed this addition. - # If we have already apliced the b=0 volumes from the DWI input with the SE-EPI image - # (due to the absence of phase-encoding contrast in the SE-EPI series), we don't want to - # re-attempt such a concatenation; the fact that the DWI b=0 images were inserted ahead of - # the SE-EPI images means the alignment issue should be dealt with. - - if dwi_first_bzero_index == len(grad) and not dwi_bzero_added_to_se_epi: - - app.warn('Unable to find b=0 volume in input DWIs to provide alignment between topup and eddy; script will proceed as though the -align_seepi option were not provided') - - # If b=0 volumes from the DWIs have already been added to the SE-EPI image due to an - # absence of phase-encoding contrast in the latter, we don't need to perform the following - elif not dwi_bzero_added_to_se_epi: - - run.command('mrconvert dwi.mif dwi_first_bzero.mif -coord 3 ' + str(dwi_first_bzero_index) + ' -axes 0,1,2') - dwi_first_bzero_pe = dwi_manual_pe_scheme[dwi_first_bzero_index] if overwrite_dwi_pe_scheme else dwi_pe_scheme[dwi_first_bzero_index] - - se_epi_pe_sum = [ 0, 0, 0 ] - se_epi_volume_to_remove = len(se_epi_pe_scheme) - for index, line in enumerate(se_epi_pe_scheme): - se_epi_pe_sum = [ i + j for i, j in zip(se_epi_pe_sum, line[0:3]) ] - if se_epi_volume_to_remove == len(se_epi_pe_scheme) and line[0:3] == dwi_first_bzero_pe[0:3]: - se_epi_volume_to_remove = index - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_firstdwibzero.mif' - if (se_epi_pe_sum == [ 0, 0, 0 ]) and (se_epi_volume_to_remove < len(se_epi_pe_scheme)): - app.console('Balanced phase-encoding scheme detected in SE-EPI series; volume ' + str(se_epi_volume_to_remove) + ' will be removed and replaced with first b=0 from DWIs') - run.command('mrconvert ' + se_epi_path + ' - -coord 3 ' + ','.join([str(index) for index in range(len(se_epi_pe_scheme)) if not index == se_epi_volume_to_remove]) + ' | mrcat dwi_first_bzero.mif - ' + new_se_epi_path + ' -axis 3') - # Also need to update the phase-encoding scheme appropriately if it's being set manually - # (if embedded within the image headers, should be updated through the command calls) - if se_epi_manual_pe_scheme: - first_line = list(manual_pe_dir) - first_line.append(trt) - new_se_epi_manual_pe_scheme = [ ] - new_se_epi_manual_pe_scheme.append(first_line) - for index, entry in enumerate(se_epi_manual_pe_scheme): - if not index == se_epi_volume_to_remove: - new_se_epi_manual_pe_scheme.append(entry) - se_epi_manual_pe_scheme = new_se_epi_manual_pe_scheme - else: - if se_epi_pe_sum == [ 0, 0, 0 ] and se_epi_volume_to_remove == len(se_epi_pe_scheme): - app.console('Phase-encoding scheme of -se_epi image is balanced, but could not find appropriate volume with which to substitute first b=0 volume from DWIs; first b=0 DWI volume will be inserted to start of series, resulting in an unbalanced scheme') - else: - app.console('Unbalanced phase-encoding scheme detected in series provided via -se_epi option; first DWI b=0 volume will be inserted to start of series') - run.command('mrcat dwi_first_bzero.mif ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') - # Also need to update the phase-encoding scheme appropriately - if se_epi_manual_pe_scheme: - first_line = list(manual_pe_dir) - first_line.append(trt) - se_epi_manual_pe_scheme = [ first_line, se_epi_manual_pe_scheme ] - - # Ended branching based on balanced-ness of PE acquisition scheme within SE-EPI volumes - file.delTemporary(se_epi_path) - file.delTemporary('dwi_first_bzero.mif') - se_epi_path = new_se_epi_path - - # Ended branching based on: - # - Detection of first b=0 volume in DWIs; or - # - Prior merge of SE-EPI and DWI b=0 volumes due to no phase-encoding contrast in SE-EPI - - # Completed checking for presence of -se_epi option - -elif not PE_design == 'None': # No SE EPI images explicitly provided: In some cases, can extract appropriate b=0 images from DWI - - # If using 'All' or 'Header', and haven't been given any topup images, need to extract the b=0 volumes from the series, - # preserving phase-encoding information while doing so - # Preferably also make sure that there's some phase-encoding contrast in there... - # With -rpe_all, need to write inferred phase-encoding to file and import before using dwiextract so that the phase-encoding - # of the extracted b=0's is propagated to the generated b=0 series - run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' - | dwiextract - ' + se_epi_path + ' -bzero') - se_epi_header = image.Header(se_epi_path) - - # If there's no contrast remaining in the phase-encoding scheme, it'll be written to - # PhaseEncodingDirection and TotalReadoutTime rather than pe_scheme - # In this scenario, we will be unable to run topup, or volume recombination - if 'pe_scheme' not in se_epi_header.keyval(): - if PE_design == 'All': - app.error('DWI header indicates no phase encoding contrast between b=0 images; cannot proceed with volume recombination-based pre-processing') - else: - app.warn('DWI header indicates no phase encoding contrast between b=0 images; proceeding without inhomogeneity field estimation') - do_topup = False - run.function(os.remove, se_epi_path) - se_epi_path = None - se_epi_header = None - - -# If the first b=0 volume in the DWIs is in fact not the first volume (i.e. index zero), we're going to -# manually place it at the start of the DWI volumes when they are input to eddy, so that the -# first input volume to topup and the first input volume to eddy are one and the same. -# Note: If at a later date, the statistical outputs from eddy are considered (e.g. motion, outliers), -# then this volume permutation will need to be taken into account -if dwi_first_bzero_index: - app.console('First b=0 volume in input DWIs is volume index ' + str(dwi_first_bzero_index) + '; ' - 'this will be permuted to be the first volume (index 0) when eddy is run') - dwi_permute_volumes_pre_eddy_option = ' -coord 3 ' + \ - str(dwi_first_bzero_index) + \ - ',0' + \ - (':' + str(dwi_first_bzero_index-1) if dwi_first_bzero_index > 1 else '') + \ - (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ - (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') - dwi_permute_volumes_post_eddy_option = ' -coord 3 1' + \ - (':' + str(dwi_first_bzero_index) if dwi_first_bzero_index > 1 else '') + \ - ',0' + \ - (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ - (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') - app.var(dwi_permute_volumes_pre_eddy_option, dwi_permute_volumes_post_eddy_option) - - - -# This may be required when setting up the topup call -import_se_epi_manual_pe_table_option = '' -if se_epi_manual_pe_scheme: - with open('se_epi_manual_pe_scheme.txt', 'w') as f: - for line in se_epi_manual_pe_scheme: - f.write(' '.join([str(value) for value in line]) + '\n') - import_se_epi_manual_pe_table_option = ' -import_pe_table se_epi_manual_pe_scheme.txt' - - -# Need gradient table if running dwi2mask after applytopup to derive a brain mask for eddy -run.command('mrinfo dwi.mif -export_grad_mrtrix grad.b') - - -eddy_in_topup_option = '' -dwi_post_eddy_crop_option = '' -dwi_path = 'dwi.mif' -if do_topup: - - # topup will crash if its input image has a spatial dimension with a non-even size; - # presumably due to a downsampling by a factor of 2 in a multi-resolution scheme - # The newest eddy also requires the output from topup and the input DWIs to have the same size; - # therefore this restriction applies to the DWIs as well - # Rather than crop in this case (which would result in a cropped output image), - # duplicate the last slice on any problematic axis, and then crop that extra - # slice at the output step - # By this point, if the input SE-EPI images and DWIs are not on the same image grid, the - # SE-EPI images have already been re-gridded to DWI image space; - odd_axis_count = 0 - for axis_size in dwi_header.size()[:3]: - if int(axis_size%2): - odd_axis_count += 1 - if odd_axis_count: - app.console(str(odd_axis_count) + ' spatial ' + ('axes of DWIs have' if odd_axis_count > 1 else 'axis of DWIs has') + ' non-even size; ' - 'this will be automatically padded for compatibility with topup, and the extra slice' + ('s' if odd_axis_count > 1 else '') + ' erased afterwards') - for axis, axis_size in enumerate(dwi_header.size()[:3]): - if int(axis_size%2): - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_pad' + str(axis) + '.mif' - run.command('mrconvert ' + se_epi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + se_epi_path + ' - ' + new_se_epi_path + ' -axis ' + str(axis)) - file.delTemporary(se_epi_path) - se_epi_path = new_se_epi_path - new_dwi_path = os.path.splitext(dwi_path)[0] + '_pad' + str(axis) + '.mif' - run.command('mrconvert ' + dwi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + dwi_path + ' - ' + new_dwi_path + ' -axis ' + str(axis)) - file.delTemporary(dwi_path) - dwi_path = new_dwi_path - dwi_post_eddy_crop_option += ' -coord ' + str(axis) + ' 0:' + str(axis_size-1) - # If we are padding the slice axis, and performing slice-to-volume correction, - # then we need to perform the corresponding padding to the slice timing - if eddy_mporder and slice_encoding_direction[axis]: - dwi_num_slices += 1 - # At this point in the script, this information may be encoded either within - # the slice timing vector (as imported from the image header), or as - # slice groups (i.e. in the format expected by eddy). How these data are - # stored affects how the padding is performed. - if slice_timing: - slice_timing.append(slice_timing[-1]) - elif slice_groups: - # Can't edit in place when looping through the list - new_slice_groups = [ ] - for group in slice_groups: - if axis_size-1 in group: - group.append(axis_size) - new_slice_groups.append(group) - slice_groups = new_slice_groups - - - # Do the conversion in preparation for topup - run.command('mrconvert ' + se_epi_path + ' topup_in.nii' + import_se_epi_manual_pe_table_option + ' -strides -1,+2,+3,+4 -export_pe_table topup_datain.txt') - file.delTemporary(se_epi_path) - - # Run topup - topup_manual_options = '' - if app.args.topup_options: - topup_manual_options = ' ' + app.args.topup_options.strip() - (topup_stdout, topup_stderr) = run.command(topup_cmd + ' --imain=topup_in.nii --datain=topup_datain.txt --out=field --fout=field_map' + fsl_suffix + ' --config=' + topup_config_path + topup_manual_options) - with open('topup_output.txt', 'w') as f: - f.write(topup_stdout + '\n' + topup_stderr) - if app.verbosity > 2: - app.console('Output of topup command:\n' + topup_stdout + '\n' + topup_stderr) - - # Apply the warp field to the input image series to get an initial corrected volume estimate - # applytopup can't receive the complete DWI input and correct it as a whole, because the phase-encoding - # details may vary between volumes - if dwi_manual_pe_scheme: - run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + ' - | mrinfo - -export_pe_eddy applytopup_config.txt applytopup_indices.txt') - else: - run.command('mrinfo ' + dwi_path + ' -export_pe_eddy applytopup_config.txt applytopup_indices.txt') - - - # Update: Call applytopup separately for each unique phase-encoding - # This should be the most compatible option with more complex phase-encoding acquisition designs, - # since we don't need to worry about applytopup performing volume recombination - # Plus, recombination doesn't need to be optimal; we're only using this to derive a brain mask - applytopup_image_list = [ ] - index = 1 - with open('applytopup_config.txt', 'r') as f: - for line in f: - prefix = os.path.splitext(dwi_path)[0] + '_pe_' + str(index) - input_path = prefix + '.nii' - json_path = prefix + '.json' - temp_path = prefix + '_applytopup.nii' - output_path = prefix + '_applytopup.mif' - run.command('dwiextract ' + dwi_path + import_dwi_pe_table_option + ' -pe ' + ','.join(line.split()) + ' - | mrconvert - ' + input_path + ' -json_export ' + json_path) - run.command(applytopup_cmd + ' --imain=' + input_path + ' --datain=applytopup_config.txt --inindex=' + str(index) + ' --topup=field --out=' + temp_path + ' --method=jac') - file.delTemporary(input_path) - temp_path = fsl.findImage(temp_path) - run.command('mrconvert ' + temp_path + ' ' + output_path + ' -json_import ' + json_path) - file.delTemporary(json_path) - file.delTemporary(temp_path) - applytopup_image_list.append(output_path) - index += 1 - - # Use the initial corrected volumes to derive a brain mask for eddy - if len(applytopup_image_list) == 1: - run.command('dwi2mask ' + applytopup_image_list[0] + ' - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - else: - run.command('mrcat ' + ' '.join(applytopup_image_list) + ' - -axis 3 | dwi2mask - - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - - for entry in applytopup_image_list: - file.delTemporary(entry) - - eddy_in_topup_option = ' --topup=field' - -else: - - # Generate a processing mask for eddy based on the uncorrected input DWIs - run.command('dwi2mask ' + dwi_path + ' - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - - -# Generate the text file containing slice timing / grouping information if necessary -if eddy_mporder: - if slice_timing: - # This list contains, for each slice, the timing offset between acquisition of the - # first slice in the volume, and acquisition of that slice - # Eddy however requires a text file where each row contains those slices that were - # acquired with a single readout, in ordered rows from first slice (group) - # acquired to last slice (group) acquired - if sum(slice_encoding_direction) < 0: - slice_timing = reversed(slice_timing) - slice_groups = [ [ x[0] for x in g ] for _, g in itertools.groupby(sorted(enumerate(slice_timing), key=lambda x:x[1]), key=lambda x:x[1]) ] #pylint: disable=unused-variable - app.var(slice_timing, slice_groups) - # Variable slice_groups may have already been defined in the correct format. - # In that instance, there's nothing to do other than write it to file; - # UNLESS the slice encoding direction is known to be reversed, in which case - # we need to reverse the timings. Would think that this would however be - # rare, given it requires that the slspec text file be provided manually but - # SliceEncodingDirection to be present. - elif slice_groups and sum(slice_encoding_direction) < 0: - new_slice_groups = [ ] - for group in new_slice_groups: - new_slice_groups.append([ dwi_num_slices-index for index in group ]) - app.var(slice_groups, new_slice_groups) - slice_groups = new_slice_groups - - with open('slspec.txt', 'w') as f: - for line in slice_groups: - f.write(' '.join(str(value) for value in line) + '\n') - eddy_manual_options.append('--slspec=slspec.txt') - - -# Prepare input data for eddy -run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + dwi_permute_volumes_pre_eddy_option + ' eddy_in.nii -strides -1,+2,+3,+4 -export_grad_fsl bvecs bvals -export_pe_eddy eddy_config.txt eddy_indices.txt') -file.delTemporary(dwi_path) - - -# May need to add command-line options depending on version of eddy -# in order to get desired image output for -eddyqc_all option -def eddy_options_qc(eddy_cmd, existing_options, eddyqc_all): - if not eddyqc_all: - return [] - eddy_help_process = subprocess.Popen([ eddy_cmd, '--help' ], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) - eddy_help_stderr = eddy_help_process.communicate()[1].decode('utf-8', errors='replace').splitlines() - result = [] - for eddy_help_line in eddy_help_stderr: - eddy_help_line = eddy_help_line.lstrip() - for opt in [ '--cnr_maps', '--residuals' ]: - if eddy_help_line.startswith(opt) and opt not in existing_options: - result.append(opt) - return result - - -# Run eddy -# If a CUDA version is in PATH, run that first; if it fails, re-try using the non-CUDA version -eddy_all_options = '--imain=eddy_in.nii --mask=eddy_mask.nii --acqp=eddy_config.txt --index=eddy_indices.txt --bvecs=bvecs --bvals=bvals' + eddy_in_topup_option + ' --out=dwi_post_eddy' -eddy_cuda_cmd = fsl.eddyBinary(True) -eddy_openmp_cmd = fsl.eddyBinary(False) -if eddy_cuda_cmd: - additional_eddy_options = eddy_options_qc(eddy_cuda_cmd, eddy_manual_options, app.args.eddyqc_all) - # If running CUDA version, but OpenMP version is also available, don't stop the script if the CUDA version fails - (eddy_stdout, eddy_stderr) = run.command(eddy_cuda_cmd + ' ' - + eddy_all_options - + (' ' + ' '.join(eddy_manual_options) if eddy_manual_options else '') - + (' ' + ' '.join(additional_eddy_options) if additional_eddy_options else ''), - not eddy_openmp_cmd) - if app.verbosity > 2: - app.console('Output of CUDA eddy command:\n' + eddy_stdout + '\n' + eddy_stderr) - if os.path.isfile('dwi_post_eddy.eddy_parameters'): - # Flag that the OpenMP version won't be attempted - eddy_openmp_cmd = '' - else: - app.warn('CUDA version of eddy appears to have failed; trying OpenMP version') -if eddy_openmp_cmd: - additional_eddy_options = eddy_options_qc(eddy_openmp_cmd, eddy_manual_options, app.args.eddyqc_all) - (eddy_stdout, eddy_stderr) = run.command(eddy_openmp_cmd + ' ' - + eddy_all_options - + (' ' + ' '.join(eddy_manual_options) if eddy_manual_options else '') - + (' ' + ' '.join(additional_eddy_options) if additional_eddy_options else '')) - if app.verbosity > 2: - app.console('Output of OpenMP eddy command:\n' + eddy_stdout + '\n' + eddy_stderr) -file.delTemporary('eddy_in.nii') -file.delTemporary('eddy_mask.nii') -if do_topup: - file.delTemporary(fsl.findImage('field_fieldcoef')) -with open('eddy_output.txt', 'w') as f: - f.write(eddy_stdout + '\n' + eddy_stderr) -eddy_output_image_path = fsl.findImage('dwi_post_eddy') - - -# Get the axis strides from the input series, so the output image can be modified to match -stride_option = ' -strides ' + ','.join([str(i) for i in dwi_header.strides()]) - - -# Check to see whether or not eddy has provided a rotated bvecs file; -# if it has, import this into the output image -bvecs_path = 'dwi_post_eddy.eddy_rotated_bvecs' -if not os.path.isfile(bvecs_path): - app.warn('eddy has not provided rotated bvecs file; using original gradient table. Recommend updating FSL eddy to version 5.0.9 or later.') - bvecs_path = 'bvecs' - - -# Determine whether or not volume recombination should be performed -# This could be either due to use of -rpe_all option, or just due to the data provided with -rpe_header -# Rather than trying to re-use the code that was used in the case of -rpe_all, run fresh code -# The phase-encoding scheme needs to be checked also -volume_matchings = [ dwi_num_volumes ] * dwi_num_volumes -volume_pairs = [ ] -app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') -for index1 in range(dwi_num_volumes): - if volume_matchings[index1] == dwi_num_volumes: # As yet unpaired - for index2 in range(index1+1, dwi_num_volumes): - if volume_matchings[index2] == dwi_num_volumes: # Also as yet unpaired - # Here, need to check both gradient matching and reversed phase-encode direction - if not any(dwi_pe_scheme[index1][i] + dwi_pe_scheme[index2][i] for i in range(0,3)) and grads_match(grad[index1], grad[index2]): - volume_matchings[index1] = index2 - volume_matchings[index2] = index1 - volume_pairs.append([index1, index2]) - app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + '\n' + - 'Phase encoding: ' + str(dwi_pe_scheme[index1]) + ' ' + str(dwi_pe_scheme[index2]) + '\n' + - 'Gradients: ' + str(grad[index1]) + ' ' + str(grad[index2])) - break - - -if len(volume_pairs) != int(dwi_num_volumes/2): - - if do_topup: - file.delTemporary('topup_in.nii') - file.delTemporary(fsl.findImage('field_map')) - - # Convert the resulting volume to the output image, and re-insert the diffusion encoding - run.command('mrconvert ' + eddy_output_image_path + ' result.mif' + dwi_permute_volumes_post_eddy_option + dwi_post_eddy_crop_option + stride_option + ' -fslgrad ' + bvecs_path + ' bvals') - file.delTemporary(eddy_output_image_path) - -else: - app.console('Detected matching DWI volumes with opposing phase encoding; performing explicit volume recombination') - - # Perform a manual combination of the volumes output by eddy, since LSR is disabled - - # Generate appropriate bvecs / bvals files - # Particularly if eddy has provided rotated bvecs, since we're combining two volumes into one that - # potentially have subject rotation between them (and therefore the sensitisation direction is - # not precisely equivalent), the best we can do is take the mean of the two vectors. - # Manual recombination of volumes needs to take into account the explicit volume matching - - bvecs = [ [] for axis in range(3) ] - with open(bvecs_path, 'r') as f: - for axis, line in enumerate(f): - bvecs[axis] = line.split() - - bvecs_combined_transpose = [ ] - bvals_combined = [ ] - - for pair in volume_pairs: - bvec_sum = [ float(bvecs[0][pair[0]]) + float(bvecs[0][pair[1]]), - float(bvecs[1][pair[0]]) + float(bvecs[1][pair[1]]), - float(bvecs[2][pair[0]]) + float(bvecs[2][pair[1]]) ] - norm2 = bvec_sum[0]*bvec_sum[0] + bvec_sum[1]*bvec_sum[1] + bvec_sum[2]*bvec_sum[2] - # If one diffusion sensitisation gradient direction is reversed with respect to - # the other, still want to enable their recombination; but need to explicitly - # account for this when averaging the two directions - if norm2 < 0.0: - bvec_sum = [ float(bvecs[0][pair[0]]) - float(bvecs[0][pair[1]]), - float(bvecs[1][pair[0]]) - float(bvecs[1][pair[1]]), - float(bvecs[2][pair[0]]) - float(bvecs[2][pair[1]]) ] - norm2 = bvec_sum[0]*bvec_sum[0] + bvec_sum[1]*bvec_sum[1] + bvec_sum[2]*bvec_sum[2] - # Occasionally a bzero volume can have a zero vector - if norm2: - factor = 1.0 / math.sqrt(norm2) - new_vec = [ bvec_sum[0]*factor, bvec_sum[1]*factor, bvec_sum[2]*factor ] - else: - new_vec = [ 0.0, 0.0, 0.0 ] - bvecs_combined_transpose.append(new_vec) - bvals_combined.append(0.5 * (grad[pair[0]][3] + grad[pair[1]][3])) - - with open('bvecs_combined', 'w') as f: - for axis in range(0, 3): - axis_data = [ ] - for volume in range(0, int(dwi_num_volumes/2)): - axis_data.append(str(bvecs_combined_transpose[volume][axis])) - f.write(' '.join(axis_data) + '\n') - - with open('bvals_combined', 'w') as f: - f.write(' '.join( [ str(b) for b in bvals_combined ] )) - - - # Prior to 5.0.8, a bug resulted in the output field map image from topup having an identity transform, - # regardless of the transform of the input image - # Detect this, and manually replace the transform if necessary - # (even if this doesn't cause an issue with the subsequent mrcalc command, it may in the future, it's better for - # visualising the script temporary files, and it gives the user a warning about an out-of-date FSL) - field_map_image = fsl.findImage('field_map') - field_map_header = image.Header(field_map_image) - if not image.match('topup_in.nii', field_map_header, 3): - app.warn('topup output field image has erroneous header; recommend updating FSL to version 5.0.8 or later') - new_field_map_image = 'field_map_fix.mif' - run.command('mrtransform ' + field_map_image + ' -replace topup_in.nii ' + new_field_map_image) - file.delTemporary(field_map_image) - field_map_image = new_field_map_image - # In FSL 6.0.0, field map image is erroneously constructed with the same number of volumes as the input image, - # with all but the first volume containing intensity-scaled duplicates of the uncorrected input images - # The first volume is however the expected field offset image - elif len(field_map_header.size()) == 4: - app.console('Correcting erroneous FSL 6.0.0 field map image output') - new_field_map_image = 'field_map_fix.mif' - run.command('mrconvert ' + field_map_image + ' -coord 3 0 -axes 0,1,2 ' + new_field_map_image) - file.delTemporary(field_map_image) - field_map_image = new_field_map_image - file.delTemporary('topup_in.nii') - - - # Derive the weight images - # Scaling term for field map is identical to the bandwidth provided in the topup config file - # (converts Hz to pixel count; that way a simple image gradient can be used to get the Jacobians) - # Let mrfilter apply the default 1 voxel size gaussian smoothing filter before calculating the field gradient - # - # The jacobian image may be different for any particular volume pair - # The appropriate PE directions and total readout times can be acquired from the eddy-style config/index files - # eddy_config.txt and eddy_indices.txt - - eddy_config = [ [ float(f) for f in line.split() ] for line in open('eddy_config.txt', 'r').read().split('\n')[:-1] ] - eddy_indices = [ int(i) for i in open('eddy_indices.txt', 'r').read().split() ] - app.var(eddy_config, eddy_indices) - - # This section derives, for each phase encoding configuration present, the 'weight' to be applied - # to the image during volume recombination, which is based on the Jacobian of the field in the - # phase encoding direction - for index, config in enumerate(eddy_config): - pe_axis = [ i for i, e in enumerate(config[0:3]) if e != 0][0] - sign_multiplier = ' -1.0 -mult' if config[pe_axis] < 0 else '' - field_derivative_path = 'field_deriv_pe_' + str(index+1) + '.mif' - run.command('mrcalc ' + field_map_image + ' ' + str(config[3]) + ' -mult' + sign_multiplier + ' - | mrfilter - gradient - | mrconvert - ' + field_derivative_path + ' -coord 3 ' + str(pe_axis) + ' -axes 0,1,2') - jacobian_path = 'jacobian_' + str(index+1) + '.mif' - run.command('mrcalc 1.0 ' + field_derivative_path + ' -add 0.0 -max ' + jacobian_path) - file.delTemporary(field_derivative_path) - run.command('mrcalc ' + jacobian_path + ' ' + jacobian_path + ' -mult weight' + str(index+1) + '.mif') - file.delTemporary(jacobian_path) - file.delTemporary(field_map_image) - - # If eddy provides its main image output in a compressed format, the code block below will need to - # uncompress that image independently for every volume pair. Instead, if this is the case, let's - # convert it to an uncompressed format before we do anything with it. - if eddy_output_image_path.endswith('.gz'): - new_eddy_output_image_path = 'dwi_post_eddy_uncompressed.mif' - run.command('mrconvert ' + eddy_output_image_path + ' ' + new_eddy_output_image_path) - file.delTemporary(eddy_output_image_path) - eddy_output_image_path = new_eddy_output_image_path - - # If the DWI volumes were permuted prior to running eddy, then the simplest approach is to permute them - # back to their original positions; otherwise, the stored gradient vector directions / phase encode - # directions / matched volume pairs are no longer appropriate - if dwi_permute_volumes_post_eddy_option: - new_eddy_output_image_path = os.path.splitext(eddy_output_image_path)[0] + '_volpermuteundo.mif' - run.command('mrconvert ' + eddy_output_image_path + dwi_permute_volumes_post_eddy_option + ' ' + new_eddy_output_image_path) - file.delTemporary(eddy_output_image_path) - eddy_output_image_path = new_eddy_output_image_path - - # This section extracts the two volumes corresponding to each reversed phase-encoded volume pair, and - # derives a single image volume based on the recombination equation - combined_image_list = [ ] - progress = app.progressBar('Performing explicit volume recombination', len(volume_pairs)) - for index, volumes in enumerate(volume_pairs): - pe_indices = [ eddy_indices[i] for i in volumes ] - run.command('mrconvert ' + eddy_output_image_path + ' volume0.mif -coord 3 ' + str(volumes[0])) - run.command('mrconvert ' + eddy_output_image_path + ' volume1.mif -coord 3 ' + str(volumes[1])) - # Volume recombination equation described in Skare and Bammer 2010 - combined_image_path = 'combined' + str(index) + '.mif' - run.command('mrcalc volume0.mif weight' + str(pe_indices[0]) + '.mif -mult volume1.mif weight' + str(pe_indices[1]) + '.mif -mult -add weight' + str(pe_indices[0]) + '.mif weight' + str(pe_indices[1]) + '.mif -add -divide 0.0 -max ' + combined_image_path) - combined_image_list.append(combined_image_path) - run.function(os.remove, 'volume0.mif') - run.function(os.remove, 'volume1.mif') - progress.increment() - progress.done() - - file.delTemporary(eddy_output_image_path) - for index in range(0, len(eddy_config)): - file.delTemporary('weight' + str(index+1) + '.mif') - - # Finally the recombined volumes must be concatenated to produce the resulting image series - run.command('mrcat ' + ' '.join(combined_image_list) + ' - -axis 3 | mrconvert - result.mif' + dwi_post_eddy_crop_option + ' -fslgrad bvecs_combined bvals_combined' + stride_option) - for entry in combined_image_list: - file.delTemporary(entry) - - -# Grab any relevant files that eddy has created, and copy them to the requested directory -if eddyqc_path: - if os.path.exists(eddyqc_path) and not os.path.isdir(eddyqc_path): - run.function(os.remove, eddyqc_path) - if not os.path.exists(eddyqc_path): - run.function(os.makedirs, eddyqc_path) - for filename in eddyqc_files: - if os.path.exists('dwi_post_eddy.' + filename): - run.function(os.rename if app.cleanup else shutil.copy, - 'dwi_post_eddy.' + filename, - os.path.join(eddyqc_path, filename)) - if app.args.eddyqc_all: - for basename in eddyqc_images: - # Can't use fsl.findImage(): in this version, will call app.error() if not found - for extension in [ '.nii', '.nii.gz' ]: - full_path = 'dwi_post_eddy.' + basename + extension - if os.path.exists(full_path): - run.function(os.rename if app.cleanup else shutil.copy, - full_path, - basename + extension) - - -# Build a list of header key-value entries that we want to _remove_ from the -# output image, as they may have been useful for controlling pre-processing -# but are no longer required, and will just bloat the key-value listings of -# all subsequent derived images -# Disabled this for now: The output from eddy is a NIfTI, so all these fields -# have been lost. For now just neglect to re-introduce them; in the future, -# this may be combined with GitHub Issue #1188 (proper behaviour of -# command_history header key-value entry when running a Python script) -#keys_to_remove = [ 'EchoTime', 'FlipAngle', 'MultibandAccelerationFactor', 'PhaseEncodingDirection', 'RepetitionTime', 'SliceEncodingDirection', 'SliceTiming', 'TotalReadoutTime', 'pe_scheme' ] -#clear_property_options = ' ' + ' '.join(['-clear_property '+key for key in keys_to_remove if key in dwi_header.keyval() ]) - - -# Finish! -run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + grad_export_option + (' -force' if app.forceOverwrite else '')) -app.complete() diff --git a/bin/dwishellmath b/bin/dwishellmath index 45cb6f216b..362efd0e18 100755 --- a/bin/dwishellmath +++ b/bin/dwishellmath @@ -1,32 +1,65 @@ -#!/bin/bash +#!/usr/bin/env python -if [ $# -eq 0 ]; then - cat < - For example: - $ dwishellmath dwi.mif mean dc.mif - will output the mean diffusion-weighted volume per shell. +SUPPORTED_OPS = ['mean', 'median', 'sum', 'product', 'rms', 'norm', 'var', 'std', 'min', 'max', 'absmax', 'magmax'] -AVAILABLE OPERATIONS: - mean, median, sum, product, rms, norm, var, std, min, max, absmax, magmax. -HELP_PAGE +def usage(cmdline): #pylint: disable=unused-variable + from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel + cmdline.set_author('Daan Christiaens (daan.christiaens@kcl.ac.uk)') + cmdline.set_synopsis('Apply an mrmath operation to each b-value shell in a DWI series') + cmdline.add_description('The output of this command is a 4D image, where ' + 'each volume corresponds to a b-value shell (in order of increasing b-value), and ' + 'the intensities within each volume correspond to the chosen statistic having been computed from across the DWI volumes belonging to that b-value shell.') + cmdline.add_argument('input', help='The input diffusion MRI series') + cmdline.add_argument('operation', choices=SUPPORTED_OPS, help='The operation to be applied to each shell; this must be one of the following: ' + ', '.join(SUPPORTED_OPS)) + cmdline.add_argument('output', help='The output image series') + cmdline.add_example_usage('To compute the mean diffusion-weighted signal in each b-value shell', + 'dwishellmath dwi.mif mean shellmeans.mif') + app.add_dwgrad_import_options(cmdline) - exit 1 -fi -tmpfiles="" -for b in $(mrinfo "$1" -shell_bvalues); -do - echo "Extracting $2 of the b=$b shell." - tmpfiles="$tmpfiles "$(dwiextract -shells $b "$1" - -quiet | mrmath -axis 3 - $2 - -quiet) -done -mrcat -axis 3 $tmpfiles "$3" -quiet +def execute(): #pylint: disable=unused-variable + from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel + # check inputs and outputs + dwi_header = image.Header(path.from_user(app.ARGS.input, False)) + if len(dwi_header.size()) != 4: + raise MRtrixError('Input image must be a 4D image') + gradimport = app.read_dwgrad_import_options() + if not gradimport and 'dw_scheme' not in dwi_header.keyval(): + raise MRtrixError('No diffusion gradient table provided, and none present in image header') + app.check_output_path(app.ARGS.output) + # import data and gradient table + app.make_scratch_dir() + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif') + gradimport + ' -strides 0,0,0,1') + app.goto_scratch_dir() + # run per-shell operations + files = [] + for index, bvalue in enumerate(image.mrinfo('in.mif', 'shell_bvalues').split()): + filename = 'shell-{:02d}.mif'.format(index) + run.command('dwiextract -shells ' + bvalue + ' in.mif - | mrmath -axis 3 - ' + app.ARGS.operation + ' ' + filename) + files.append(filename) + # concatenate to output file + run.command('mrcat -axis 3 ' + ' '.join(files) + ' out.mif') + run.command('mrconvert out.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/for_each b/bin/for_each new file mode 100755 index 0000000000..7e7dc76908 --- /dev/null +++ b/bin/for_each @@ -0,0 +1,301 @@ +#!/usr/bin/env python + +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + + +import os, re, sys, threading + + + +# Since we're going to capture everything after the colon character and "hide" it from argparse, +# we need to store the contents from there in a global so as for it to be accessible from execute() +CMDSPLIT = [ ] + + + +def usage(cmdline): #pylint: disable=unused-variable + global CMDSPLIT + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') + cmdline.set_synopsis('Perform some arbitrary processing step for each of a set of inputs') + cmdline.add_description('This script greatly simplifies various forms of batch processing by enabling the execution of a command (or set of commands) independently for each of a set of inputs. Part of the way that this is achieved is by providing basic text substitutions, which simplify the formation of valid command strings based on the unique components of the input strings on which the script is instructed to execute. The available substitutions are listed below (note that the -test command-line option can be used to ensure correct command string formation prior to actually executing the commands):') + cmdline.add_description(' - IN: The full matching pattern, including leading folders. For example, if the target list contains a file "folder/image.mif", any occurrence of "IN" will be substituted with "folder/image.mif".') + cmdline.add_description(' - NAME: The basename of the matching pattern. For example, if the target list contains a file "folder/image.mif", any occurrence of "NAME" will be substituted with "image.mif".') + cmdline.add_description(' - PRE: The prefix of the input pattern (the basename stripped of its extension). For example, if the target list contains a file "folder/my.image.mif.gz", any occurrence of "PRE" will be substituted with "my.image".') + cmdline.add_description(' - UNI: The unique part of the input after removing any common prefix and common suffix. For example, if the target list contains files: "folder/001dwi.mif", "folder/002dwi.mif", "folder/003dwi.mif", any occurrence of "UNI" will be substituted with "001", "002", "003".') + cmdline.add_description('Note that due to a limitation of the Python "argparse" module, any command-line OPTIONS that the user intends to provide specifically to the for_each script must appear BEFORE providing the list of inputs on which for_each is intended to operate. While command-line options provided as such will be interpreted specifically by the for_each script, any command-line options that are provided AFTER the COLON separator will form part of the executed COMMAND, and will therefore be interpreted as command-line options having been provided to that underlying command.') + cmdline.add_example_usage('Demonstration of basic usage syntax', + 'for_each folder/*.mif : mrinfo IN', + 'This will run the "mrinfo" command for every .mif file present in "folder/". Note that the compulsory colon symbol is used to separate the list of items on which for_each is being instructed to operate, from the command that is intended to be run for each input.') + cmdline.add_example_usage('Multi-threaded use of for_each', + 'for_each -nthreads 4 freesurfer/subjects/* : recon-all -subjid NAME -all', + 'In this example, for_each is instructed to run the FreeSurfer command \'recon-all\' for all subjects within the \'subjects\' directory, with four subjects being processed in parallel at any one time. Whenever processing of one subject is completed, processing for a new unprocessed subject will commence. This technique is useful for improving the efficiency of running single-threaded commands on multi-core systems, as long as the system possesses enough memory to support such parallel processing. Note that in the case of multi-threaded commands (which includes many MRtrix3 commands), it is generally preferable to permit multi-threaded execution of the command on a single input at a time, rather than processing multiple inputs in parallel.') + cmdline.add_example_usage('Excluding specific inputs from execution', + 'for_each *.nii -exclude 001.nii : mrconvert IN PRE.mif', + 'Particularly when a wildcard is used to define the list of inputs for for_each, it is possible in some instances that this list will include one or more strings for which execution should in fact not be performed; for instance, if a command has already been executed for one or more files, and then for_each is being used to execute the same command for all other files. In this case, the -exclude option can be used to effectively remove an item from the list of inputs that would otherwise be included due to the use of a wildcard (and can be used more than once to exclude more than one string). In this particular example, mrconvert is instructed to perform conversions from NIfTI to MRtrix image formats, for all except the first image in the directory. Note that any usages of this option must appear AFTER the list of inputs. Note also that the argument following the -exclude option can alternatively be a regular expression, in which case any inputs for which a match to the expression is found will be excluded from processing.') + cmdline.add_example_usage('Testing the command string substitution', + 'for_each -test * : mrconvert IN PRE.mif', + 'By specifying the -test option, the script will print to the terminal the results of text substitutions for all of the specified inputs, but will not actually execute those commands. It can therefore be used to verify that the script is receiving the intended set of inputs, and that the text substitutions on those inputs lead to the intended command strings.') + cmdline.add_argument('inputs', help='Each of the inputs for which processing should be run', nargs='+') + cmdline.add_argument('colon', help='Colon symbol (":") delimiting the for_each inputs & command-line options from the actual command to be executed', type=str, choices=[':']) + cmdline.add_argument('command', help='The command string to run for each input, containing any number of substitutions listed in the Description section', type=str) + cmdline.add_argument('-exclude', help='Exclude one specific input string / all strings matching a regular expression from being processed (see Example Usage)', action='append', metavar='"regex"', nargs=1) + cmdline.add_argument('-test', help='Test the operation of the for_each script, by printing the command strings following string substitution but not actually executing them', action='store_true', default=False) + + # Usage of for_each needs to be handled slightly differently here: + # We want argparse to parse only the contents of the command-line before the colon symbol, + # as these are the items that pertain to the invocation of the for_each script; + # anything after the colon should instead form a part of the command that + # for_each is responsible for executing + try: + index = next(i for i,s in enumerate(sys.argv) if s == ':') + try: + CMDSPLIT = sys.argv[index+1:] + sys.argv = sys.argv[:index+1] + sys.argv.append(' '.join(CMDSPLIT)) + except IndexError: + sys.stderr.write('Erroneous usage: No command specified (colon separator cannot be the last entry provided)\n') + sys.exit(0) + except StopIteration: + if len(sys.argv) > 2: + sys.stderr.write('Erroneous usage: A colon must be used to separate for_each inputs from the command to be executed\n') + sys.exit(0) + + + + + + + + +# These need to be globals in order to be accessible from execute_parallel() +class Shared(object): + def __init__(self): + self._job_index = 0 + self.lock = threading.Lock() + self.stop = False + def next(self, jobs): + job = None + with self.lock: + if self._job_index < len(jobs): + job = jobs[self._job_index] + self._job_index += 1 + self.stop = self._job_index == len(jobs) + return job + +shared = Shared() #pylint: disable=invalid-name + + + +KEYLIST = [ 'IN', 'NAME', 'PRE', 'UNI' ] + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import ANSI, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import app, run #pylint: disable=no-name-in-module, import-outside-toplevel + global CMDSPLIT + + inputs = app.ARGS.inputs + app.debug('All inputs: ' + str(inputs)) + app.debug('Command: ' + str(app.ARGS.command)) + app.debug('CMDSPLIT: ' + str(CMDSPLIT)) + + if app.ARGS.exclude: + app.ARGS.exclude = [ exclude[0] for exclude in app.ARGS.exclude ] # To deal with argparse's action=append. Always guaranteed to be only one argument since nargs=1 + app.debug('To exclude: ' + str(app.ARGS.exclude)) + exclude_unmatched = [ ] + to_exclude = [ ] + for exclude in app.ARGS.exclude: + if exclude in inputs: + to_exclude.append(exclude) + else: + try: + re_object = re.compile(exclude) + regex_hits = [ ] + for arg in inputs: + if re_object.search(arg).group(): + regex_hits.append(arg) + if regex_hits: + to_exclude.extend(regex_hits) + else: + exclude_unmatched.append(exclude) + except Exception: + exclude_unmatched.append(exclude) + if exclude_unmatched: + app.warn('Item' + ('s' if len(exclude_unmatched) > 1 else '') + ' specified via -exclude did not appear in input list: ' + str('\'' + exclude_unmatched[0] + '\'' if len(exclude_unmatched) == 1 else exclude_unmatched)) + inputs = [ arg for arg in inputs if arg not in to_exclude ] + if not inputs: + raise MRtrixError('No inputs remaining after application of exclusion criteri' + ('on' if len(app.ARGS.exclude) == 1 else 'a')) + app.debug('Inputs after exclusion: ' + str(inputs)) + + common_prefix = os.path.commonprefix(inputs) + common_suffix = os.path.commonprefix([i[::-1] for i in inputs])[::-1] + app.debug('Common prefix: ' + common_prefix if common_prefix else 'No common prefix') + app.debug('Common suffix: ' + common_suffix if common_suffix else 'No common suffix') + + for entry in CMDSPLIT: + if os.path.exists(entry): + keys_present = [ key for key in KEYLIST if key in entry ] + if keys_present: + app.warn('Performing text substitution of ' + str(keys_present) + ' within command: "' + entry + '"; but the original text exists as a path on the file system... is this a problematic filesystem path?') + + try: + next(entry for entry in CMDSPLIT if any(key for key in KEYLIST if key in entry)) + except StopIteration: + raise MRtrixError('None of the unique for_each keys ' + str(KEYLIST) + ' appear in command string "' + app.ARGS.command + '"; no substitution can occur') + + class Entry(object): + def __init__(self, input_text): + self.input_text = input_text + self.sub_in = input_text + self.sub_name = os.path.basename(input_text.rstrip('/')) + self.sub_pre = os.path.splitext(self.sub_name.rstrip('.gz'))[0] + if common_suffix: + self.sub_uni = input_text[len(common_prefix):-len(common_suffix)] + else: + self.sub_uni = input_text[len(common_prefix):] + + self.substitutions = { 'IN': self.sub_in, 'NAME': self.sub_name, 'PRE': self.sub_pre, 'UNI': self.sub_uni } + app.debug('Input text: ' + input_text) + app.debug('Substitutions: ' + str(self.substitutions)) + + self.cmd = [ ] + for entry in CMDSPLIT: + for (key, value) in self.substitutions.items(): + entry = entry.replace(key, value) + if ' ' in entry: + entry = '"' + entry + '"' + self.cmd.append(entry) + app.debug('Resulting command: ' + str(self.cmd)) + + self.outputtext = None + self.returncode = None + + jobs = [ ] + for i in inputs: + jobs.append(Entry(i)) + + if app.ARGS.test: + app.console('Command strings for ' + str(len(jobs)) + ' jobs:') + for job in jobs: + sys.stderr.write(ANSI.execute + 'Input:' + ANSI.clear + ' "' + job.input_text + '"\n') + sys.stderr.write(ANSI.execute + 'Command:' + ANSI.clear + ' ' + ' '.join(job.cmd) + '\n') + return + + parallel = app.NUM_THREADS is not None and app.NUM_THREADS > 1 + + def progress_string(): + text = str(sum([ 1 if job.returncode is not None else 0 for job in jobs ])) + \ + '/' + \ + str(len(jobs)) + \ + ' jobs completed ' + \ + ('across ' + str(app.NUM_THREADS) + ' threads' if parallel else 'sequentially') + fail_count = sum([ 1 if job.returncode else 0 for job in jobs ]) + if fail_count: + text += ' (' + str(fail_count) + ' errors)' + return text + + progress = app.ProgressBar(progress_string(), len(jobs)) + + def execute_parallel(): + global shared #pylint: disable=invalid-name + while not shared.stop: + my_job = shared.next(jobs) + if not my_job: + return + try: + result = run.command(' '.join(my_job.cmd), shell=True) + my_job.outputtext = result.stdout + result.stderr + my_job.returncode = 0 + except run.MRtrixCmdError as exception: + my_job.outputtext = str(exception) + my_job.returncode = exception.returncode + except Exception as exception: # pylint: disable=broad-except + my_job.outputtext = str(exception) + my_job.returncode = 1 + with shared.lock: + progress.increment(progress_string()) + + if parallel: + threads = [ ] + for i in range (1, app.NUM_THREADS): + thread = threading.Thread(target=execute_parallel) + thread.start() + threads.append(thread) + execute_parallel() + for thread in threads: + thread.join() + else: + for job in jobs: + try: + result = run.command(' '.join(job.cmd), shell=True) + job.outputtext = result.stdout + result.stderr + job.returncode = 0 + except run.MRtrixCmdError as exception: + job.outputtext = str(exception) + job.returncode = exception.returncode + except Exception as exception: # pylint: disable=broad-except + job.outputtext = str(exception) + job.returncode = 1 + progress.increment(progress_string()) + + progress.done() + + assert all([ job.returncode is not None for job in jobs ]) + fail_count = sum([ 1 if job.returncode else 0 for job in jobs ]) + if fail_count: + app.warn(str(fail_count) + ' of ' + str(len(jobs)) + ' jobs did not complete successfully') + if fail_count > 1: + app.warn('Outputs from failed commands:') + sys.stderr.write(app.EXEC_NAME + ':\n') + else: + app.warn('Output from failed command:') + for job in jobs: + if job.returncode: + if job.outputtext: + app.warn('For input "' + job.sub_in + '" (returncode = ' + str(job.returncode) + '):') + for line in job.outputtext.splitlines(): + sys.stderr.write(' ' * (len(app.EXEC_NAME)+2) + line + '\n') + else: + app.warn('No output from command for input "' + job.sub_in + '" (return code = ' + str(job.returncode) + ')') + if fail_count > 1: + sys.stderr.write(app.EXEC_NAME + ':\n') + raise MRtrixError(str(fail_count) + ' of ' + str(len(jobs)) + ' jobs did not complete successfully') + + if app.VERBOSITY > 1: + if any(job.outputtext for job in jobs): + sys.stderr.write(app.EXEC_NAME + ':\n') + for job in jobs: + if job.outputtext: + app.console('Output of command for input "' + job.sub_in + '":') + for line in job.outputtext.splitlines(): + sys.stderr.write(' ' * (len(app.EXEC_NAME)+2) + line + '\n') + else: + app.console('No output from command for input "' + job.sub_in + '"') + sys.stderr.write(app.EXEC_NAME + ':\n') + else: + app.console('No output from command for any inputs') + + app.console('Script reported successful completion for all inputs') + + + + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/foreach b/bin/foreach deleted file mode 100755 index 02de27c249..0000000000 --- a/bin/foreach +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash - -if [ $# -eq 0 ]; then - cat < : command ... - - For example: - $ foreach folder/*.mif : mrinfo IN - will run mrinfo for each matching file in "folder" - -AVAILABLE SUBSTITUTIONS: - IN: the full matching pattern, including leading folders - For example, if the target list contains a file "folder/image.mif", - any occurrence of "IN" will be substituted with "folder/image.mif". - NAME: the basename of the matching pattern - For example, if the target list contains a file "folder/image.mif", - any occurrence of "NAME" will be substituted with "image.mif". - PRE: the prefix of the matching pattern - For example, if the target list contains a file "folder/image.mif", - any occurrence of "PRE" will be substituted with "image". - UNI: the unique prefix of the matching pattern after removing the common suffix - For example, if the target list contains files: - "folder/001dwi.mif", "folder/002dwi.mif", "folder/003dwi.mif" - any occurrence of "UNI" will be substituted with "001", "002", "003". - -PARALLEL PROCESSING: - To run multiple jobs at once, add the -N option before the colon, - where N is the number of concurrent jobs required. - - For example: - $ foreach -32 input/* : mrconvert IN output/NAME - will run up to 32 of the required jobs in parallel - -SEQUENTIAL PROCESSING: - To run a series of sequential jobs for each input file, you can - use these standard shell constructs: - | ; && || - Note that these will need to quoted in inverted commas to prevent - the shell from interpreting them. - - For example: - - $ foreach -8 images/*.nii : \\ - echo processing NAME... ";" \\ - mrcalc IN mask/PRE.mif -mult "images masked"/PRE.mif -quiet "&&" \\ - mrstats IN -mask images\\ masked/PRE.mif \\| tail -n 1 - - will run the three commands specified for each input file one - after the other, and run 8 of these processing pipelines in parallel. - It will only invoke the mrstats command if the previous mrmath - command completes without errors, since the "&&" construct was used - (see below). Note that the backslash character ("\\") can be used to - break a command over multiple lines; this can help to make your - scripts more readable. - - For reference: - | pipe the output of current command into the input of the next - ; means "run next command regardless of what happens with the - current command" - && means "run next command only if current command succeeds - without error (i.e. returns with a zero exit code) - || means "run next command only if current command fails - -HANDLING SPACES IN FILE NAMES: - The example above also illustrates how spaces in filenames (and other - non-standard characters) can be handled (the "images masked" folder): - either using inverted commas, or by prefixing the relevant characters - with a backslash ("\\"). - -HELP_PAGE - - exit 1 -fi - - -# extract list of targets and number of jobs if specified: -args=() -njobs="" -verbose="" -while (( "$#" )); do - if [ "x$1" == "x:" ]; then break; fi - if [ "x$1" == "x-v" ]; then - verbose="true" - elif [ "x${1:0:1}" == "x-" ]; then - njobs="-P ${1:1} " - else - args+=("$1") - fi - shift -done - - -# extract command to run: -shift -while (( "$#" )); do - cmd+="\"$1\" " - shift -done - -if [ x"$cmd" == "x" ]; then - echo "error: no command specified!" - exit 1 -fi - -common_suffix=`printf '%s\n' "${args[@]}" | rev | sed -e '1{h;d;}' -e 'G;s,\(.*\).*\n\1.*,\1,;h;$!d' | rev` - -( for arg in "${args[@]}"; do - name=$(basename "$arg") - pre="${name%.*}" - uni=$(basename "$arg" "$common_suffix") - - actual_cmd=${cmd//IN/$arg} - actual_cmd=${actual_cmd//NAME/$name} - actual_cmd=${actual_cmd//PRE/$pre} - actual_cmd=${actual_cmd//UNI/$uni} - actual_cmd=${actual_cmd//\"|\"/|} - actual_cmd=${actual_cmd//\";\"/;} - actual_cmd=${actual_cmd//\"&&\"/&&} - actual_cmd=${actual_cmd//\"||\"/||} - - if [ "x$verbose" == "xtrue" ]; then - printf "executing: %s\n" "$actual_cmd" 1>&2 - fi - - echo $actual_cmd - -#done ) | tr '\n' '\0' | xargs -0 printf "\"%s\"\n" -done ) | tr '\n' '\0' | xargs -0 $njobs -n 1 bash -c - diff --git a/bin/gen_scheme b/bin/gen_scheme index 3d7371cd4e..a0e582ba7b 100755 --- a/bin/gen_scheme +++ b/bin/gen_scheme @@ -1,5 +1,20 @@ #!/bin/bash +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + set -e if [ "$#" -eq 0 ]; then @@ -12,7 +27,7 @@ SYNOPSIS numPE the number of phase-encoding directions to be included in the scheme (most scanners will only support a single PE - direction per sequence, so this will typically be 1). + direction per sequence, so this will typically be 1). bvalue the b-value of the shell @@ -24,7 +39,7 @@ DESCRIPTION This script generates a diffusion gradient table according to the parameters specified. For most users, something like the following would be appropriate: - + gen_scheme 1 0 5 750 20 3000 60 which will geneate a multi-shell diffusion gradient table with a single @@ -32,13 +47,13 @@ DESCRIPTION volumes. The gradient table is generated using the following procedure: - + - The directions for each shell are optimally distributed using a bipolar - electrostatic repulsion model (using the command 'dirgen'). + electrostatic repulsion model (using the command 'dirgen'). - These are then split into numPE sets (if numPE != 1) using a brute-force random search for the most optimally-distributed subsets (using the command - 'dirsplit'). + 'dirsplit'). - Each of the resulting sets is then rearranged by inversion of individual directions through the origin (i.e. direction vector x => -x) using a @@ -53,13 +68,13 @@ DESCRIPTION b-value and directional domains. In other words, the approach aims to ensure that if the acquisition is cut short, the set of volumes acquired nonetheless contains the same relative proportions of b-values as - specified, with directions that are near-uniformly distributed. + specified, with directions that are near-uniformly distributed. The primary output of this command is a file called 'dw_scheme.txt', consisting of a 5-column table, with one line per volume. Each column consists of [ x y z b PE ], where [ x y z ] is the unit direction vector, b - is the b-value in unit of s/mm², and PE is a integer ID from 1 to numPE. - + is the b-value in unit of s/mm², and PE is a integer ID from 1 to numPE. + The command also retains all of the subsets generated along the way, which you can safely delete once the command has completed. Since this can consist of quite a few files, it is recommended to run this command within @@ -107,16 +122,16 @@ else if [ $nPE -gt 2 ]; then dirsplit dirs-b$1-$2-1.txt dirs-b$1-$2-1{1..2}.txt -force $perm dirsplit dirs-b$1-$2-2.txt dirs-b$1-$2-2{1..2}.txt -force $perm - # TODO: the rest... - for n in dirs-b$1-$2-{1,2}{1,2}.txt; do + # TODO: the rest... + for n in dirs-b$1-$2-{1,2}{1,2}.txt; do dirflip $n ${n%.txt}-flip.txt -force $perm - merge=$merge" "${n%.txt}-flip.txt + merge=$merge" "${n%.txt}-flip.txt done - else + else for n in dirs-b$1-$2-{1,2}.txt; do dirflip $n ${n%.txt}-flip.txt -force $perm merge=$merge" "${n%.txt}-flip.txt - done + done fi else dirflip dirs-b$1-$2.txt dirs-b$1-$2-flip.txt -force $perm diff --git a/bin/labelsgmfix b/bin/labelsgmfix index 92594b8312..cbd11013ec 100755 --- a/bin/labelsgmfix +++ b/bin/labelsgmfix @@ -1,5 +1,20 @@ #!/usr/bin/env python +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + # Script for 'repairing' a FreeSurfer parcellation image # FreeSurfer's sub-cortical structure segmentation has been observed to be highly variable # under scan-rescan conditions. This introduces unwanted variability into the connectome, @@ -8,145 +23,153 @@ # node indices that correspond to these structures, and replaces them with estimates # derived from FIRST. -# Make the corresponding MRtrix3 Python libraries available -import inspect, math, os, sys -lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib')) -if not os.path.isdir(lib_folder): - sys.stderr.write('Unable to locate MRtrix3 Python libraries') - sys.exit(1) -sys.path.insert(0, lib_folder) - - - -from mrtrix3 import app, fsl, image, path, run - - - -app.init('Robert E. Smith (robert.smith@florey.edu.au)', - 'In a FreeSurfer parcellation image, replace the sub-cortical grey matter structure delineations using FSL FIRST') -app.cmdline.addCitation('', 'Patenaude, B.; Smith, S. M.; Kennedy, D. N. & Jenkinson, M. A Bayesian model of shape and appearance for subcortical brain segmentation. NeuroImage, 2011, 56, 907-922', True) -app.cmdline.addCitation('', 'Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', True) -app.cmdline.addCitation('', 'Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. The effects of SIFT on the reproducibility and biological accuracy of the structural connectome. NeuroImage, 2015, 104, 253-265', False) -app.cmdline.add_argument('parc', help='The input FreeSurfer parcellation image') -app.cmdline.add_argument('t1', help='The T1 image to be provided to FIRST') -app.cmdline.add_argument('lut', help='The lookup table file that the parcellated image is based on') -app.cmdline.add_argument('output', help='The output parcellation image') -options = app.cmdline.add_argument_group('Options for the labelsgmfix script') -options.add_argument('-premasked', action='store_true', default=False, help='Indicate that brain masking has been applied to the T1 input image') -options.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Consider the amygdalae and hippocampi as sub-cortical grey matter structures, and also replace their estimates with those from FIRST') -app.parse() - -app.checkOutputPath(path.fromUser(app.args.output, False)) -image.check3DNonunity(path.fromUser(app.args.t1, False)) - -if app.isWindows(): - app.error('Script cannot run on Windows due to FSL dependency') - -fsl_path = os.environ.get('FSLDIR', '') -if not fsl_path: - app.error('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - -first_cmd = fsl.exeName('run_first_all') - -first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') -if not os.path.isdir(first_atlas_path): - app.error('Atlases required for FSL\'s FIRST program not installed;\nPlease install fsl-first-data using your relevant package manager') - -# Want a mapping between FreeSurfer node names and FIRST structure names -# Just deal with the 5 that are used in ACT; FreeSurfer's hippocampus / amygdala segmentations look good enough. -structure_map = { 'L_Accu':'Left-Accumbens-area', 'R_Accu':'Right-Accumbens-area', - 'L_Caud':'Left-Caudate', 'R_Caud':'Right-Caudate', - 'L_Pall':'Left-Pallidum', 'R_Pall':'Right-Pallidum', - 'L_Puta':'Left-Putamen', 'R_Puta':'Right-Putamen', - 'L_Thal':'Left-Thalamus-Proper', 'R_Thal':'Right-Thalamus-Proper' } -if app.args.sgm_amyg_hipp: - structure_map.update({ 'L_Amyg':'Left-Amygdala', 'R_Amyg':'Right-Amygdala', - 'L_Hipp':'Left-Hippocampus', 'R_Hipp':'Right-Hippocampus' }) - -t1_spacing = image.Header(path.fromUser(app.args.t1, False)).spacing() -upsample_for_first = False -# If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data -if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225: - app.warn('Voxel size of input T1 image larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' - 'image will be resampled to 1mm isotropic in order to maximise chance of ' - 'FSL FIRST script succeeding') - upsample_for_first = True - -app.makeTempDir() - -# Get the parcellation and T1 images into the temporary directory, with conversion of the T1 into the correct format for FSL -run.command('mrconvert ' + path.fromUser(app.args.parc, True) + ' ' + path.toTemp('parc.mif', True)) -if upsample_for_first: - run.command('mrresize ' + path.fromUser(app.args.t1, True) + ' - -voxel 1.0 -interp sinc | mrcalc - 0.0 -max - | mrconvert - ' + path.toTemp('T1.nii', True) + ' -strides -1,+2,+3') -else: - run.command('mrconvert ' + path.fromUser(app.args.t1, True) + ' ' + path.toTemp('T1.nii', True) + ' -strides -1,+2,+3') - -app.gotoTempDir() - -# Run FIRST -first_input_is_brain_extracted = '' -if app.args.premasked: - first_input_is_brain_extracted = ' -b' -run.command(first_cmd + ' -m none -s ' + ','.join(structure_map.keys()) + ' -i T1.nii' + first_input_is_brain_extracted + ' -o first') -fsl.checkFirst('first', structure_map.keys()) - -# Generate an empty image that will be used to construct the new SGM nodes -run.command('mrcalc parc.mif 0 -min sgm.mif') - -# Read the local connectome LUT file -# This will map a structure name to an index -sgm_lut = {} -sgm_lut_file_name = 'FreeSurferSGM.txt' -sgm_lut_file_path = os.path.join(path.sharedDataPath(), path.scriptSubDirName(), sgm_lut_file_name) -with open(sgm_lut_file_path) as f: - for line in f: - line = line.rstrip() - if line and line[0]!='#': - line = line.split() - sgm_lut[line[1]] = line[0] # This can remain as a string - -# Convert FIRST meshes to node masks -# In this use case, don't want the PVE images; want to threshold at 0.5 -mask_list = [ ] -progress = app.progressBar('Generating mask images for SGM structures', len(structure_map)) -for key, value in structure_map.items(): - image_path = key + '_mask.mif' - mask_list.append(image_path) - vtk_in_path = 'first-' + key + '_first.vtk' - run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii') - run.command('mesh2voxel first-' + key + '_transformed.vtk parc.mif - | mrthreshold - ' + image_path + ' -abs 0.5') - # Add to the SGM image; don't worry about overlap for now - node_index = sgm_lut[value] - run.command('mrcalc ' + image_path + ' ' + node_index + ' sgm.mif -if sgm_new.mif') - if not app.continueOption: - run.function(os.remove, 'sgm.mif') - run.function(os.rename, 'sgm_new.mif', 'sgm.mif') - progress.increment() -progress.done() - -# Detect any overlapping voxels between the SGM masks, and set to zero -run.command('mrmath ' + ' '.join(mask_list) + ' sum - | mrcalc - 1 -gt sgm_overlap_mask.mif') -run.command('mrcalc sgm_overlap_mask.mif 0 sgm.mif -if sgm_masked.mif') - -# Convert the SGM label image to the indices that are required based on the user-provided LUT file -run.command('labelconvert sgm_masked.mif ' + sgm_lut_file_path + ' ' + path.fromUser(app.args.lut, True) + ' sgm_new_labels.mif') - -# For each SGM structure: -# * Figure out what index the structure has been mapped to; this can only be done using mrstats -# * Strip that index from the parcellation image -# * Insert the new delineation of that structure -progress = app.progressBar('Replacing SGM parcellations', len(structure_map)) -for struct in structure_map: - image_path = struct + '_mask.mif' - index = image.statistic('sgm_new_labels.mif', 'median', '-mask ' + image_path) - run.command('mrcalc parc.mif ' + index + ' -eq 0 parc.mif -if parc_removed.mif') - run.function(os.remove, 'parc.mif') - run.function(os.rename, 'parc_removed.mif', 'parc.mif') - progress.increment() -progress.done() - -# Insert the new delineations of all SGM structures in a single call -# Enforce unsigned integer datatype of output image -run.command('mrcalc sgm_new_labels.mif 0.5 -gt sgm_new_labels.mif parc.mif -if result.mif -datatype uint32') -run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else '')) -app.complete() + + +import math, os + + + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('In a FreeSurfer parcellation image, replace the sub-cortical grey matter structure delineations using FSL FIRST') + cmdline.add_citation('Patenaude, B.; Smith, S. M.; Kennedy, D. N. & Jenkinson, M. A Bayesian model of shape and appearance for subcortical brain segmentation. NeuroImage, 2011, 56, 907-922', is_external=True) + cmdline.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) + cmdline.add_citation('Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. The effects of SIFT on the reproducibility and biological accuracy of the structural connectome. NeuroImage, 2015, 104, 253-265') + cmdline.add_argument('parc', help='The input FreeSurfer parcellation image') + cmdline.add_argument('t1', help='The T1 image to be provided to FIRST') + cmdline.add_argument('lut', help='The lookup table file that the parcellated image is based on') + cmdline.add_argument('output', help='The output parcellation image') + cmdline.add_argument('-premasked', action='store_true', default=False, help='Indicate that brain masking has been applied to the T1 input image') + cmdline.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Consider the amygdalae and hippocampi as sub-cortical grey matter structures, and also replace their estimates with those from FIRST') + + + + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import app, fsl, image, path, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel + + if utils.is_windows(): + raise MRtrixError('Script cannot run on Windows due to FSL dependency') + + app.check_output_path(path.from_user(app.ARGS.output, False)) + image.check_3d_nonunity(path.from_user(app.ARGS.t1, False)) + + fsl_path = os.environ.get('FSLDIR', '') + if not fsl_path: + raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') + + first_cmd = fsl.exe_name('run_first_all') + + first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') + if not os.path.isdir(first_atlas_path): + raise MRtrixError('Atlases required for FSL\'s FIRST program not installed;\nPlease install fsl-first-data using your relevant package manager') + + # Want a mapping between FreeSurfer node names and FIRST structure names + # Just deal with the 5 that are used in ACT; FreeSurfer's hippocampus / amygdala segmentations look good enough. + structure_map = { 'L_Accu':'Left-Accumbens-area', 'R_Accu':'Right-Accumbens-area', + 'L_Caud':'Left-Caudate', 'R_Caud':'Right-Caudate', + 'L_Pall':'Left-Pallidum', 'R_Pall':'Right-Pallidum', + 'L_Puta':'Left-Putamen', 'R_Puta':'Right-Putamen', + 'L_Thal':'Left-Thalamus-Proper', 'R_Thal':'Right-Thalamus-Proper' } + if app.ARGS.sgm_amyg_hipp: + structure_map.update({ 'L_Amyg':'Left-Amygdala', 'R_Amyg':'Right-Amygdala', + 'L_Hipp':'Left-Hippocampus', 'R_Hipp':'Right-Hippocampus' }) + + t1_spacing = image.Header(path.from_user(app.ARGS.t1, False)).spacing() + upsample_for_first = False + # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data + if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225: + app.warn('Voxel size of input T1 image larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' + 'image will be resampled to 1mm isotropic in order to maximise chance of ' + 'FSL FIRST script succeeding') + upsample_for_first = True + + app.make_scratch_dir() + + # Get the parcellation and T1 images into the scratch directory, with conversion of the T1 into the correct format for FSL + run.command('mrconvert ' + path.from_user(app.ARGS.parc) + ' ' + path.to_scratch('parc.mif')) + if upsample_for_first: + run.command('mrgrid ' + path.from_user(app.ARGS.t1) + ' regrid - -voxel 1.0 -interp sinc | mrcalc - 0.0 -max - | mrconvert - ' + path.to_scratch('T1.nii') + ' -strides -1,+2,+3') + else: + run.command('mrconvert ' + path.from_user(app.ARGS.t1) + ' ' + path.to_scratch('T1.nii') + ' -strides -1,+2,+3') + + app.goto_scratch_dir() + + # Run FIRST + first_input_is_brain_extracted = '' + if app.ARGS.premasked: + first_input_is_brain_extracted = ' -b' + run.command(first_cmd + ' -m none -s ' + ','.join(structure_map.keys()) + ' -i T1.nii' + first_input_is_brain_extracted + ' -o first') + fsl.check_first('first', structure_map.keys()) + + # Generate an empty image that will be used to construct the new SGM nodes + run.command('mrcalc parc.mif 0 -min sgm.mif') + + # Read the local connectome LUT file + # This will map a structure name to an index + sgm_lut = {} + sgm_lut_file_name = 'FreeSurferSGM.txt' + sgm_lut_file_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), sgm_lut_file_name) + with open(sgm_lut_file_path) as sgm_lut_file: + for line in sgm_lut_file: + line = line.rstrip() + if line and line[0]!='#': + line = line.split() + sgm_lut[line[1]] = line[0] # This can remain as a string + + # Convert FIRST meshes to node masks + # In this use case, don't want the PVE images; want to threshold at 0.5 + mask_list = [ ] + progress = app.ProgressBar('Generating mask images for SGM structures', len(structure_map)) + for key, value in structure_map.items(): + image_path = key + '_mask.mif' + mask_list.append(image_path) + vtk_in_path = 'first-' + key + '_first.vtk' + run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii') + run.command('mesh2voxel first-' + key + '_transformed.vtk parc.mif - | mrthreshold - ' + image_path + ' -abs 0.5') + # Add to the SGM image; don't worry about overlap for now + node_index = sgm_lut[value] + run.command('mrcalc ' + image_path + ' ' + node_index + ' sgm.mif -if sgm_new.mif') + if not app.CONTINUE_OPTION: + run.function(os.remove, 'sgm.mif') + run.function(os.rename, 'sgm_new.mif', 'sgm.mif') + progress.increment() + progress.done() + + # Detect any overlapping voxels between the SGM masks, and set to zero + run.command(['mrmath', mask_list, 'sum', '-', '|', \ + 'mrcalc', '-', '1', '-gt', 'sgm_overlap_mask.mif']) + run.command('mrcalc sgm_overlap_mask.mif 0 sgm.mif -if sgm_masked.mif') + + # Convert the SGM label image to the indices that are required based on the user-provided LUT file + run.command('labelconvert sgm_masked.mif ' + sgm_lut_file_path + ' ' + path.from_user(app.ARGS.lut) + ' sgm_new_labels.mif') + + # For each SGM structure: + # * Figure out what index the structure has been mapped to; this can only be done using mrstats + # * Strip that index from the parcellation image + # * Insert the new delineation of that structure + progress = app.ProgressBar('Replacing SGM parcellations', len(structure_map)) + for struct in structure_map: + image_path = struct + '_mask.mif' + index = int(image.statistics('sgm_new_labels.mif', mask=image_path).median) + run.command('mrcalc parc.mif ' + str(index) + ' -eq 0 parc.mif -if parc_removed.mif') + run.function(os.remove, 'parc.mif') + run.function(os.rename, 'parc_removed.mif', 'parc.mif') + progress.increment() + progress.done() + + # Insert the new delineations of all SGM structures in a single call + # Enforce unsigned integer datatype of output image + run.command('mrcalc sgm_new_labels.mif 0.5 -gt sgm_new_labels.mif parc.mif -if result.mif -datatype uint32') + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.parc, False), force=app.FORCE_OVERWRITE) + + + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/mrtrix3.py b/bin/mrtrix3.py new file mode 100644 index 0000000000..ea8967d532 --- /dev/null +++ b/bin/mrtrix3.py @@ -0,0 +1,67 @@ + +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import imp, os, sys +from distutils.spawn import find_executable + +def imported(lib_path): + success = False + fp = None + try: + fp, pathname, description = imp.find_module('mrtrix3', [ lib_path ]) + imp.load_module('mrtrix3', fp, pathname, description) + success = True + except ImportError: + pass + finally: + if fp: + fp.close() + return success + +# Can the MRtrix3 Python modules be found based on their relative location to this file? +# Note that this includes the case where this file is a softlink within an external module, +# which provides a direct link to the core installation +if not imported (os.path.normpath (os.path.join ( \ + os.path.dirname (os.path.realpath (__file__)), os.pardir, 'lib') )): + + # If this file is a duplicate, which has been stored in an external module, + # we may be able to figure out the location of the core library using the + # build script. + + # case 1: build is a symbolic link: + if not imported (os.path.join (os.path.dirname (os.path.realpath ( \ + os.path.join (os.path.dirname(__file__), os.pardir, 'build'))), 'lib')): + + # case 2: build is a file containing the path to the core build script: + try: + with open (os.path.join (os.path.dirname(__file__), os.pardir, 'build')) as fp: + for line in fp: + build_path = line.split ('#',1)[0].strip() + if build_path: + break + except IOError: + pass + + if not imported (os.path.join (os.path.dirname (build_path), 'lib')): + + sys.stderr.write(''' +ERROR: Unable to locate MRtrix3 Python modules + +For detailed instructions, please refer to: +https://mrtrix.readthedocs.io/en/latest/tips_and_tricks/external_modules.html +''') + sys.stderr.flush() + sys.exit(1) diff --git a/bin/mrtrix_cleanup b/bin/mrtrix_cleanup new file mode 100755 index 0000000000..70b97d2a15 --- /dev/null +++ b/bin/mrtrix_cleanup @@ -0,0 +1,140 @@ +#!/usr/bin/env python + +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + + +import math, os, re, shutil + + +POSTFIXES = [ 'B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB' ] + + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Clean up residual temporary files & scratch directories from MRtrix3 commands') + cmdline.add_description('This script will search the file system at the specified location (and in sub-directories thereof) for any temporary files or directories that have been left behind by failed or terminated MRtrix3 commands, and attempt to delete them.') + cmdline.add_description('Note that the script\'s search for temporary items will not extend beyond the user-specified filesystem location. This means that any built-in or user-specified default location for MRtrix3 piped data and scripts will not be automatically searched. Cleanup of such locations should instead be performed explicitly: e.g. "mrtrix_cleanup /tmp/" to remove residual piped images from /tmp/.') + cmdline.add_description('This script should not be run while other MRtrix3 commands are being executed: it may delete temporary items during operation that may lead to unexpected behaviour.') + cmdline.add_argument('path', help='Path from which to commence filesystem search') + cmdline.add_argument('-test', action='store_true', help='Run script in test mode: will list identified files / directories, but not attempt to delete them') + cmdline.add_argument('-failed', metavar='file', nargs=1, help='Write list of items that the script failed to delete to a text file') + cmdline.flag_mutually_exclusive_options([ 'test', 'failed' ]) + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import CONFIG #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel + + file_regex = re.compile(r"^mrtrix-tmp-[a-zA-Z0-9]{6}\..*$") + file_config_regex = re.compile(r"^" + CONFIG['TmpFilePrefix'] + r"[a-zA-Z0-9]{6}\..*$") \ + if 'TmpFilePrefix' in CONFIG and CONFIG['TmpFilePrefix'] != 'mrtrix-tmp-' \ + else None + dir_regex = re.compile(r"^\w+-tmp-[a-zA-Z0-9]{6}$") + dir_config_regex = re.compile(r"^" + CONFIG['ScriptScratchPrefix'] + r"[a-zA-Z0-9]{6}$") \ + if 'ScriptScratchPrefix' in CONFIG \ + else None + + files_to_delete = [ ] + dirs_to_delete = [ ] + root_dir = os.path.abspath(app.ARGS.path) + print_search_dir = ('' if os.path.abspath(os.getcwd()) == root_dir else ' from ' + root_dir) + def file_search(regex): + files_to_delete.extend([ os.path.join(dirname, filename) for filename in filter(regex.search, filelist) ]) + def dir_search(regex): + items = set(filter(regex.search, subdirlist)) + if items: + dirs_to_delete.extend([os.path.join(dirname, subdirname) for subdirname in items]) + subdirlist[:] = list(set(subdirlist)-items) + def print_msg(): + return 'Searching' + print_search_dir + ' (found ' + str(len(files_to_delete)) + ' files, ' + str(len(dirs_to_delete)) + ' directories)' + progress = app.ProgressBar(print_msg) + for dirname, subdirlist, filelist in os.walk(root_dir): + file_search(file_regex) + if file_config_regex: + file_search(file_config_regex) + dir_search(dir_regex) + if dir_config_regex: + dir_search(dir_config_regex) + progress.increment() + progress.done() + + if app.ARGS.test: + if files_to_delete: + app.console('Files identified (' + str(len(files_to_delete)) + '):') + for filepath in files_to_delete: + app.console(' ' + filepath) + else: + app.console('No files' + ('' if dirs_to_delete else ' or directories') + ' found') + if dirs_to_delete: + app.console('Directories identified (' + str(len(dirs_to_delete)) + '):') + for dirpath in dirs_to_delete: + app.console(' ' + dirpath) + elif files_to_delete: + app.console('No directories identified') + elif files_to_delete or dirs_to_delete: + progress = app.ProgressBar('Deleting temporaries (' + str(len(files_to_delete)) + ' files, ' + str(len(dirs_to_delete)) + ' directories)', len(files_to_delete) + len(dirs_to_delete)) + except_list = [ ] + size_deleted = 0 + for filepath in files_to_delete: + filesize = 0 + try: + filesize = os.path.getsize(filepath) + os.remove(filepath) + size_deleted += filesize + except OSError: + except_list.append(filepath) + progress.increment() + for dirpath in dirs_to_delete: + dirsize = 0 + try: + for dirname, subdirlist, filelist in os.walk(dirpath): + dirsize += sum(os.path.getsize(filename) for filename in filelist) + except OSError: + pass + try: + shutil.rmtree(dirpath) + size_deleted += dirsize + except OSError: + except_list.append(dirpath) + progress.increment() + progress.done() + postfix_index = int(math.floor(math.log(size_deleted, 1024))) if size_deleted else 0 + if postfix_index: + size_deleted = round(size_deleted / math.pow(1024, postfix_index), 2) + def print_freed(): + return ' (' + str(size_deleted) + POSTFIXES[postfix_index] + ' freed)' if size_deleted else '' + if except_list: + app.console(str(len(files_to_delete) + len(dirs_to_delete) - len(except_list)) + ' of ' + str(len(files_to_delete) + len(dirs_to_delete)) + ' items erased' + print_freed()) + if app.ARGS.failed: + with open(app.ARGS.failed, 'w') as outfile: + for item in except_list: + outfile.write(item + '\n') + app.console('List of items script failed to erase written to file "' + app.ARGS.failed + '"') + else: + app.console('Items that could not be erased:') + for item in except_list: + app.console(' ' + item) + else: + app.console('All items deleted successfully' + print_freed()) + else: + app.console('No files or directories found') + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/notfound b/bin/notfound index 331eeec32a..915398480b 100755 --- a/bin/notfound +++ b/bin/notfound @@ -1,13 +1,28 @@ #!/bin/bash +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + if [ $# -eq 0 ]; then cat << 'HELP_PAGE' -USAGE: +USAGE: $ notfound base_directory search_string - This is a simple script designed to help identify subjects that do not yet have a specific file generated. For example when adding new patients to a study. It is designed to be used when each patient has a folder containing their images. + This is a simple script designed to help identify subjects that do not yet have a specific file generated. For example when adding new patients to a study. It is designed to be used when each patient has a folder containing their images. - For example: + For example: $ notfound study_folder fod.mif will identify all subject folders (e.g. study_folder/subject001, study_folder/subject002, ...) that do NOT contain a file fod.mif @@ -16,7 +31,7 @@ USAGE: HELP_PAGE exit 1 - + fi find ${1} -mindepth 1 -maxdepth 1 \( -type l -o -type d \) '!' -exec test -e "{}/${2}" ';' -print diff --git a/bin/population_template b/bin/population_template index b36d547bfe..a0aeb2ee3c 100755 --- a/bin/population_template +++ b/bin/population_template @@ -1,763 +1,1436 @@ #!/usr/bin/env python -# Generates an unbiased group-average template via image registration of images to a midway space. - -# Make the corresponding MRtrix3 Python libraries available -import inspect, os, sys -lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib')) -if not os.path.isdir(lib_folder): - sys.stderr.write('Unable to locate MRtrix3 Python libraries') - sys.exit(1) -sys.path.insert(0, lib_folder) +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. +# Generates an unbiased group-average template via image registration of images to a midway space. -import shutil -move = shutil.move -copy = shutil.copy -copytree = shutil.copytree -rmtree = shutil.rmtree -remove = os.remove +import collections, json, math, os, re, shutil, sys + +DEFAULT_RIGID_SCALES = [0.3,0.4,0.6,0.8,1.0,1.0] +DEFAULT_RIGID_LMAX = [2,2,2,4,4,4] +DEFAULT_AFFINE_SCALES = [0.3,0.4,0.6,0.8,1.0,1.0] +DEFAULT_AFFINE_LMAX = [2,2,2,4,4,4] + +DEFAULT_NL_SCALES = [0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0] +DEFAULT_NL_NITER = [ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] +DEFAULT_NL_LMAX = [ 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4] + +REGISTRATION_MODES = ['rigid', 'affine', 'nonlinear', 'rigid_affine', 'rigid_nonlinear', 'affine_nonlinear', 'rigid_affine_nonlinear'] + +AGGREGATION_MODES = ["mean", "median"] + +IMAGEEXT = 'mif nii mih mgh mgz img hdr'.split() + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('David Raffelt (david.raffelt@florey.edu.au) & Max Pietsch (maximilian.pietsch@kcl.ac.uk) & Thijs Dhollander (thijs.dhollander@gmail.com)') + + cmdline.set_synopsis('Generates an unbiased group-average template from a series of images') + cmdline.add_description('First a template is optimised with linear registration (rigid and/or affine, both by default), then non-linear registration is used to optimise the template further.') + cmdline.add_argument("input_dir", nargs='+', help='Input directory containing all images used to build the template') + cmdline.add_argument("template", help='Corresponding output template image. For multi-contrast registration, provide multiple paired input_dir and template arguments. Example: WM_dir WM_template.mif GM_dir GM_template.mif') + + options = cmdline.add_argument_group('Multi-contrast options') + options.add_argument('-mc_weight_initial_alignment', help='Weight contribution of each contrast to the initial alignment. Comma separated, default: 1.0') + options.add_argument('-mc_weight_rigid', help='Weight contribution of each contrast to the objective of rigid registration. Comma separated, default: 1.0') + options.add_argument('-mc_weight_affine', help='Weight contribution of each contrast to the objective of affine registration. Comma separated, default: 1.0') + options.add_argument('-mc_weight_nl', help='Weight contribution of each contrast to the objective of nonlinear registration. Comma separated, default: 1.0') + + linoptions = cmdline.add_argument_group('Options for the linear registration') + linoptions.add_argument('-linear_no_pause', action='store_true', help='Do not pause the script if a linear registration seems implausible') + linoptions.add_argument('-linear_estimator', help='Specify estimator for intensity difference metric. Valid choices are: l1 (least absolute: |x|), l2 (ordinary least squares), lp (least powers: |x|^1.2), Default: None (no robust estimator used)') + linoptions.add_argument('-rigid_scale', help='Specify the multi-resolution pyramid used to build the rigid template, in the form of a list of scale factors (default: %s). This and affine_scale implicitly define the number of template levels' % ','.join([str(x) for x in DEFAULT_RIGID_SCALES])) + linoptions.add_argument('-rigid_lmax', help='Specify the lmax used for rigid registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in DEFAULT_RIGID_LMAX])) + linoptions.add_argument('-rigid_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:50 for each scale). This must be a single number or a list of same length as the linear_scale factor list') + linoptions.add_argument('-affine_scale', help='Specify the multi-resolution pyramid used to build the affine template, in the form of a list of scale factors (default: %s). This and rigid_scale implicitly define the number of template levels' % ','.join([str(x) for x in DEFAULT_AFFINE_SCALES])) + linoptions.add_argument('-affine_lmax', help='Specify the lmax used for affine registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in DEFAULT_AFFINE_LMAX])) + linoptions.add_argument('-affine_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:500 for each scale). This must be a single number or a list of same length as the linear_scale factor list') + + nloptions = cmdline.add_argument_group('Options for the non-linear registration') + nloptions.add_argument('-nl_scale', help='Specify the multi-resolution pyramid used to build the non-linear template, in the form of a list of scale factors (default: %s). This implicitly defines the number of template levels' % ','.join([str(x) for x in DEFAULT_NL_SCALES])) + nloptions.add_argument('-nl_lmax', help='Specify the lmax used for non-linear registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in DEFAULT_NL_LMAX])) + nloptions.add_argument('-nl_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in DEFAULT_NL_NITER])) + nloptions.add_argument('-nl_update_smooth', default='2.0', help='Regularise the gradient update field with Gaussian smoothing (standard deviation in voxel units, Default 2.0 x voxel_size)') + nloptions.add_argument('-nl_disp_smooth', default='1.0', help='Regularise the displacement field with Gaussian smoothing (standard deviation in voxel units, Default 1.0 x voxel_size)') + nloptions.add_argument('-nl_grad_step', default='0.5', help='The gradient step size for non-linear registration (Default: 0.5)') + + options = cmdline.add_argument_group('Input, output and general options') + options.add_argument('-type', help='Specify the types of registration stages to perform. Options are "rigid" (perform rigid registration only which might be useful for intra-subject registration in longitudinal analysis), "affine" (perform affine registration) and "nonlinear" as well as cominations of registration types: %s. Default: rigid_affine_nonlinear' % ', '.join('"' + x + '"' for x in REGISTRATION_MODES if "_" in x), default='rigid_affine_nonlinear') + options.add_argument('-voxel_size', help='Define the template voxel size in mm. Use either a single value for isotropic voxels or 3 comma separated values.') + options.add_argument('-initial_alignment', default='mass', help='Method of alignment to form the initial template. Options are "mass" (default), "robust_mass" (requires masks), "geometric" and "none".') + options.add_argument('-mask_dir', help='Optionally input a set of masks inside a single directory, one per input image (with the same file name prefix). Using masks will speed up registration significantly. Note that masks are used for registration, not for aggregation. To exclude areas from aggregation, NaN-mask your input images.') + options.add_argument('-warp_dir', help='Output a directory containing warps from each input to the template. If the folder does not exist it will be created') + options.add_argument('-transformed_dir', help='Output a directory containing the input images transformed to the template. If the folder does not exist it will be created. For multi-contrast registration, provide comma separated list of directories.') + options.add_argument('-linear_transformations_dir', help='Output a directory containing the linear transformations used to generate the template. If the folder does not exist it will be created') + options.add_argument('-template_mask', help='Output a template mask. Only works if -mask_dir has been input. The template mask is computed as the intersection of all subject masks in template space.') + options.add_argument('-noreorientation', action='store_true', help='Turn off FOD reorientation in mrregister. Reorientation is on by default if the number of volumes in the 4th dimension corresponds to the number of coefficients in an antipodally symmetric spherical harmonic series (i.e. 6, 15, 28, 45, 66 etc)') + options.add_argument('-leave_one_out', help='Register each input image to a template that does not contain that image. Valid choices: 0, 1, auto. (Default: auto (true if n_subjects larger than 2 and smaller than 15)) ') + options.add_argument('-aggregate', help='Measure used to aggregate information from transformed images to the template image. Valid choices: %s. Default: mean' % ', '.join(AGGREGATION_MODES)) + options.add_argument('-aggregation_weights', help='Comma separated file containing weights used for weighted image aggregation. Each row must contain the identifiers of the input image and its weight. Note that this weighs intensity values not transformations (shape).') + options.add_argument('-nanmask', action='store_true', help='Optionally apply masks to (transformed) input images using NaN values to specify include areas for registration and aggregation. Only works if -mask_dir has been input.') + options.add_argument('-copy_input', action='store_true', help='Copy input images and masks into local scratch directory.') + +# ENH: add option to initialise warps / transformations # Binds raw_input() to input() in Python2, so that input() can be used # and the code will work on both Python 2 and 3 try: - input = raw_input #pylint: disable=redefined-builtin + input = raw_input #pylint: disable=redefined-builtin, invalid-name except NameError: pass -import math -from mrtrix3 import app, file, image, path, run #pylint: disable=redefined-builtin - - -from mrtrix3.path import allindir - def abspath(*arg): return os.path.abspath(os.path.join(*arg)) + def relpath(*arg): - return os.path.relpath(os.path.join(*arg), app.workingDir) + from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel + return os.path.relpath(os.path.join(*arg), app.WORKING_DIR) + +def copy(src, dst, follow_symlinks=True): + """Copy data but do not set mode bits. Return the file's destination. + + mimics shutil.copy but without setting mode bits as shutil.copymode can fail on exotic mounts + (observed on cifs with file_mode=0777). + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + if sys.version_info[0] > 2: + shutil.copyfile(src, dst, follow_symlinks=follow_symlinks) # pylint: disable=unexpected-keyword-arg + else: + shutil.copyfile(src, dst) + return dst -try: - from numpy import loadtxt, savetxt, dot -except ImportError: - app.console("numpy not found; using replacement functions") - def loadtxt(fname, delimiter=" ", dtype=float): - with open(fname, "rb") as f: - return [[dtype(a) for a in l.decode(errors='ignore').rstrip().split(delimiter)] for l in f.readlines()] - - def savetxt(fname, X, delimiter=" ", fmt="%.14e"): - try: - fh = open(fname, 'wb') - if len(X) == 1: - ncol = 1 - else: - ncol = len(X[0]) - fmt = [fmt, ] * ncol - row_format = delimiter.join(fmt) - for row in X: - fh.write(((row_format % tuple(row) + '\n').encode(errors='ignore'))) - finally: - fh.close() - - def dot(a, b): - # matrix dot product - # does not check input dimensions - return [[sum(x*y for x,y in zip(a_row,b_col)) for b_col in zip(*b)] for a_row in a] - -def check_linear_transformation (transformation, cmd, max_scaling = 0.5, max_shear = 0.2, max_rot = 2 * math.pi, pause_on_warn = True): - - def load_key_value(file_path): - res = {} - with open (file_path, "r") as f: - for line in f.readlines(): - if len(line)==1 or line.startswith("#"): - continue - name, var = line.rstrip().partition(":")[::2] - if name in res.keys(): - res[name].append(var.split()) - else: - res[name] = var.split() - return res - bGood = True - run.command ('transformcalc ' + transformation + ' decompose ' + transformation + 'decomp') - if not os.path.isfile(transformation + 'decomp'): # does not exist if run with -continue option +def check_linear_transformation(transformation, cmd, max_scaling=0.5, max_shear=0.2, max_rot=None, pause_on_warn=True): + from mrtrix3 import app, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel + if max_rot is None: + max_rot = 2 * math.pi + + good = True + run.command('transformcalc ' + transformation + ' decompose ' + transformation + 'decomp') + if not os.path.isfile(transformation + 'decomp'): # does not exist if run with -continue option app.console(transformation + 'decomp not found. skipping check') return True - data = load_key_value(transformation + 'decomp') - run.function(remove, transformation + 'decomp') + data = utils.load_keyval(transformation + 'decomp') + run.function(os.remove, transformation + 'decomp') scaling = [float(value) for value in data['scaling']] - if any([a < 0 for a in scaling]) or any([a > (1 + max_scaling) for a in scaling]) or any([a < (1 - max_scaling) for a in scaling]): - app.warn ("large scaling (" + str(scaling) + ") in " + transformation) - bGood = False + if any([a < 0 for a in scaling]) or any([a > (1 + max_scaling) for a in scaling]) or any( + [a < (1 - max_scaling) for a in scaling]): + app.warn("large scaling (" + str(scaling) + ") in " + transformation) + good = False shear = [float(value) for value in data['shear']] if any([abs(a) > max_shear for a in shear]): - app.warn ("large shear (" + str(shear) + ") in " + transformation) - bGood = False + app.warn("large shear (" + str(shear) + ") in " + transformation) + good = False rot_angle = float(data['angle_axis'][0]) if abs(rot_angle) > max_rot: - app.warn ("large rotation (" + str(rot_angle) + ") in " + transformation) - bGood = False + app.warn("large rotation (" + str(rot_angle) + ") in " + transformation) + good = False - if not bGood: + if not good: newcmd = [] what = '' init_rotation_found = False skip = 0 - for e in cmd.split(): + for element in cmd.split(): if skip: skip -= 1 continue - if '_init_rotation' in e: + if '_init_rotation' in element: init_rotation_found = True - if '_init_matrix' in e: + if '_init_matrix' in element: skip = 1 continue - if 'affine_scale' in e: + if 'affine_scale' in element: assert what != 'rigid' what = 'affine' - elif 'rigid_scale' in e: + elif 'rigid_scale' in element: assert what != 'affine' what = 'rigid' - newcmd.append(e) - newcmd=" ".join(newcmd) + newcmd.append(element) + newcmd = " ".join(newcmd) if not init_rotation_found: app.console("replacing the transformation obtained with:") app.console(cmd) if what: - newcmd += ' -'+what+'_init_translation mass -'+what+'_init_rotation search' + newcmd += ' -' + what + '_init_translation mass -' + what + '_init_rotation search' app.console("by the one obtained with:") app.console(newcmd) - run.command(newcmd) - return check_linear_transformation (transformation, newcmd, max_scaling, max_shear, max_rot, pause_on_warn = pause_on_warn) + run.command(newcmd, force=True) + return check_linear_transformation(transformation, newcmd, max_scaling, max_shear, max_rot, pause_on_warn=pause_on_warn) if pause_on_warn: app.warn("you might want to manually repeat mrregister with different parameters and overwrite the transformation file: \n%s" % transformation) app.console('The command that failed the test was: \n' + cmd) app.console('Working directory: \n' + os.getcwd()) input("press enter to continue population_template") - return bGood - -class Input(object): - def __init__(self, filename, prefix, directory, mask_filename = '', mask_directory = ''): - self.filename = filename - self.prefix = prefix - self.directory = directory - self.mask_filename = mask_filename - self.mask_directory = mask_directory - -rigid_scales = [0.3,0.4,0.6,0.8,1.0,1.0] -rigid_lmax = [2,2,2,4,4,4] -affine_scales = [0.3,0.4,0.6,0.8,1.0,1.0] -affine_lmax = [2,2,2,4,4,4] - -nl_scales = [0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0] -nl_niter = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 ,5 ,5 ,5 ,5 ] -nl_lmax = [2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4 ,4 ,4 ,4 ,4 ] - -registration_modes = ["rigid","affine","nonlinear","rigid_affine","rigid_nonlinear","affine_nonlinear","rigid_affine_nonlinear"] - - -app.init('David Raffelt (david.raffelt@florey.edu.au) & Max Pietsch (maximilian.pietsch@kcl.ac.uk) & Thijs Dhollander (thijs.dhollander@gmail.com)', - 'Generates an unbiased group-average template from a series of images') -app.cmdline.addDescription('First a template is optimised with linear registration (rigid or affine, affine is default), then non-linear registration is used to optimise the template further.') -app.cmdline.add_argument('input_dir', help='The input directory containing all images used to build the template') -app.cmdline.add_argument('template', help='The output template image') - -linoptions = app.cmdline.add_argument_group('Options for the linear registration') -linoptions.add_argument('-linear_no_pause', action='store_true', help='Do not pause the script if a linear registration seems implausible') -linoptions.add_argument('-linear_estimator', help='Choose estimator for intensity difference metric. Valid choices are: l1 (least absolute: |x|), l2 (ordinary least squares), lp (least powers: |x|^1.2), Default: l2') -linoptions.add_argument('-rigid_scale', help='Specifiy the multi-resolution pyramid used to build the rigid template, in the form of a list of scale factors (default: %s). This and affine_scale implicitly define the number of template levels' % ','.join([str(x) for x in rigid_scales])) -linoptions.add_argument('-rigid_lmax', help='Specifiy the lmax used for rigid registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in rigid_lmax])) -linoptions.add_argument('-rigid_niter', help='Specifiy the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:50 for each scale). This must be a single number or a list of same length as the linear_scale factor list') -linoptions.add_argument('-affine_scale', help='Specifiy the multi-resolution pyramid used to build the affine template, in the form of a list of scale factors (default: %s). This and rigid_scale implicitly define the number of template levels' % ','.join([str(x) for x in affine_scales])) -linoptions.add_argument('-affine_lmax', help='Specifiy the lmax used for affine registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in affine_lmax])) -linoptions.add_argument('-affine_niter', help='Specifiy the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:500 for each scale). This must be a single number or a list of same length as the linear_scale factor list') - -nloptions = app.cmdline.add_argument_group('Options for the non-linear registration') -nloptions.add_argument('-nl_scale', help='Specifiy the multi-resolution pyramid used to build the non-linear template, in the form of a list of scale factors (default: %s). This implicitly defines the number of template levels' % ','.join([str(x) for x in nl_scales])) -nloptions.add_argument('-nl_lmax', help='Specifiy the lmax used for non-linear registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in nl_lmax])) -nloptions.add_argument('-nl_niter', help='Specifiy the number of registration iterations used within each level before updating the template, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in nl_niter])) -nloptions.add_argument('-nl_update_smooth', default='2.0', help='Regularise the gradient update field with Gaussian smoothing (standard deviation in voxel units, Default 2.0 x voxel_size)') -nloptions.add_argument('-nl_disp_smooth', default='1.0', help='Regularise the displacement field with Gaussian smoothing (standard deviation in voxel units, Default 1.0 x voxel_size)') -nloptions.add_argument('-nl_grad_step', default='0.5', help='The gradient step size for non-linear registration (Default: 0.5)') - -options = app.cmdline.add_argument_group('Input, output and general options') -options.add_argument('-type', help='Specifiy the types of registration stages to perform. Options are "rigid" (perform rigid registration only which might be useful for intra-subject registration in longitudinal analysis), "affine" (perform affine registration) and "nonlinear" as well as cominations of registration types: %s. Default: rigid_affine_nonlinear' % ', '.join('"'+x+'"' for x in registration_modes if "_" in x), default='rigid_affine_nonlinear') -options.add_argument('-voxel_size', help='Define the template voxel size in mm. Use either a single value for isotropic voxels or 3 comma separated values.') -options.add_argument('-initial_alignment', default='mass', help='Method of alignment to form the initial template. Options are "mass" (default), "geometric" and "none".') -options.add_argument('-mask_dir', help='Optionally input a set of masks inside a single directory, one per input image (with the same file name prefix). Using masks will speed up registration significantly') -options.add_argument('-warp_dir', help='Output a directory containing warps from each input to the template. If the folder does not exist it will be created') -options.add_argument('-transformed_dir', help='Output a directory containing the input images transformed to the template. If the folder does not exist it will be created') -options.add_argument('-linear_transformations_dir', help='Output a directory containing the linear transformations used to generate the template. If the folder does not exist it will be created') -options.add_argument('-template_mask', help='Output a template mask. Only works in -mask_dir has been input. The template mask is computed as the intersection of all subject masks in template space.') -options.add_argument('-noreorientation', action='store_true', help='Turn off FOD reorientation in mrregister. Reorientation is on by default if the number of volumes in the 4th dimension corresponds to the number of coefficients in an antipodally symmetric spherical harmonic series (i.e. 6, 15, 28, 45, 66 etc)') - -app.parse() - -if not app.args.type in registration_modes: - app.error("registration type must be one of %s. provided: %s" % (str(["rigid", "affine", "rigid_affine"]), app.args.type)) -dorigid = "rigid" in app.args.type -doaffine = "affine" in app.args.type -dolinear = dorigid or doaffine -dononlinear = "nonlinear" in app.args.type -assert (dorigid + doaffine + dononlinear >= 1), "FIXME: registration type not valid" - -app.args.input_dir = relpath(app.args.input_dir) -inputDir = app.args.input_dir -if not os.path.exists(inputDir): - app.error('input directory not found') -inFiles = sorted(allindir(inputDir, dir_path=False)) -if len(inFiles) <= 1: - app.error('Not enough images found in input directory. More than one image is needed to generate a population template') -else: - app.console('Generating a population-average template from ' + str(len(inFiles)) + ' input images') - -voxel_size = None -if app.args.voxel_size: - voxel_size = app.args.voxel_size.split(',') - if len(voxel_size) == 1: - voxel_size = voxel_size * 3 - try: - assert len(voxel_size) == 3 - [float(v) for v in voxel_size] #pylint: disable=expression-not-assigned - except: - app.error('voxel size needs to be a single or three comma-separated floating point numbers; received: ' + str(app.args.voxel_size)) - -initial_alignment = app.args.initial_alignment -if initial_alignment not in ["mass", "geometric", "none"]: - app.error('initial_alignment must be one of ' + " ".join(["mass", "geometric", "none"])) - -linear_estimator = app.args.linear_estimator -if linear_estimator: - if not dononlinear: - app.error('linear_estimator specified when no linear registration is requested') - if linear_estimator not in ["l1", "l2", "lp"]: - app.error('linear_estimator must be one of ' + " ".join(["l1", "l2", "lp"])) - -useMasks = False -if app.args.mask_dir: - useMasks = True - app.args.mask_dir = relpath(app.args.mask_dir) - maskDir = app.args.mask_dir - if not os.path.exists(maskDir): - app.error('mask directory not found') - maskFiles = allindir(maskDir, dir_path=False) - if len(maskFiles) < len(inFiles): - app.error('there are not enough mask images for the number of images in the input directory') - maskCommonPostfix = path.commonPostfix(maskFiles) - maskPrefixes = [] - for m in maskFiles: - maskPrefixes.append(m.split(maskCommonPostfix)[0]) - -if not useMasks: - app.warn('no masks input. Use input masks to reduce computation time and improve robustness') - -if app.args.template_mask and not useMasks: - app.error('you cannot output a template mask because no subject masks were input using -mask_dir') - -commonPostfix = path.commonPostfix(inFiles) -inputs = [] -for i in inFiles: - image.check3DNonunity(os.path.join(path.fromUser(inputDir, False), i)) - subj_prefix = i.split(commonPostfix)[0] - if useMasks: - if subj_prefix not in maskPrefixes: - app.error ('no matching mask image was found for input image ' + i) - index = maskPrefixes.index(subj_prefix) - inputs.append(Input(i, subj_prefix, path.fromUser(inputDir, False), maskFiles[index], path.fromUser(maskDir, False))) + return good + + +def aggregate(inputs, output, contrast_idx, mode, force=True): + from mrtrix3 import MRtrixError, run # pylint: disable=no-name-in-module, import-outside-toplevel + + images = [inp.ims_transformed[contrast_idx] for inp in inputs] + if mode == 'mean': + run.command(['mrmath', images, 'mean', '-keep_unary_axes', output], force=force) + elif mode == 'median': + run.command(['mrmath', images, 'median', '-keep_unary_axes', output], force=force) + elif mode == 'weighted_mean': + weights = [inp.aggregation_weight for inp in inputs] + assert not any([w is None for w in weights]), weights + wsum = sum([float(w) for w in weights]) + cmd = ['mrcalc'] + if wsum <= 0: + raise MRtrixError("the sum of aggregetion weights has to be positive") + for weight, image in zip(weights, images): + if float(weight) != 0: + cmd += [image, weight, '-mult'] + (['-add'] if len(cmd) > 1 else []) + cmd += ['%.16f' % wsum, '-div', output] + run.command(cmd, force=force) else: - inputs.append(Input(i, subj_prefix, path.fromUser(inputDir, False))) + raise MRtrixError("aggregation mode %s not understood" % mode) + + +def inplace_nan_mask(images, masks): + from mrtrix3 import run # pylint: disable=no-name-in-module, import-outside-toplevel + assert len(images) == len(masks), (len(images), len(masks)) + for image, mask in zip(images, masks): + target_dir = os.path.split(image)[0] + masked = os.path.join(target_dir, '__' + os.path.split(image)[1]) + run.command("mrcalc " + mask + " " + image + " nan -if " + masked, force=True) + run.function(shutil.move, masked, image) + + +def calculate_isfinite(inputs, contrasts): + from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel + agg_weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] + for cid in range(contrasts.n_contrasts): + for inp in inputs: + if contrasts.n_volumes[cid] > 0: + cmd = 'mrconvert ' + inp.ims_transformed[cid] + ' -coord 3 0 - | mrcalc - -finite' + else: + cmd = 'mrcalc ' + inp.ims_transformed[cid] + ' -finite' + if inp.aggregation_weight: + cmd += ' %s -mult ' % inp.aggregation_weight + cmd += ' isfinite%s/%s.mif' % (contrasts.suff[cid], inp.uid) + run.command(cmd, force=True) + for cid in range(contrasts.n_contrasts): + cmd = ['mrmath', path.all_in_dir('isfinite%s' % contrasts.suff[cid]), 'sum'] + if agg_weights: + agg_weight_norm = str(float(len(agg_weights)) / sum(agg_weights)) + cmd += ['-', '|', 'mrcalc', '-', agg_weight_norm, '-mult'] + run.command(cmd + [contrasts.isfinite_count[cid]], force=True) -noreorientation = app.args.noreorientation +def get_common_postfix(file_list): + return os.path.commonprefix([i[::-1] for i in file_list])[::-1] -do_pause_on_warn = True -if app.args.linear_no_pause: - do_pause_on_warn = False - if not dolinear: - app.error("linear option set when no linear registration is performed") -app.args.template = relpath(app.args.template) -app.checkOutputPath(app.args.template) +def get_common_prefix(file_list): + return os.path.commonprefix(file_list) -if app.args.warp_dir: - app.args.warp_dir = relpath(app.args.warp_dir) - app.checkOutputPath(app.args.warp_dir) -if app.args.transformed_dir: - app.args.transformed_dir = relpath(app.args.transformed_dir) - app.checkOutputPath(app.args.transformed_dir) +class Contrasts(object): + """ + Class that parses arguments and holds information specific to each image contrast -if app.args.linear_transformations_dir: - if not dolinear: - app.error("linear option set when no linear registration is performed") - app.args.linear_transformations_dir = relpath(app.args.linear_transformations_dir) - app.checkOutputPath(app.args.linear_transformations_dir) - - -# automatically detect SH series -do_fod_registration = False -image_size = image.Header(relpath(inputs[0].directory, inputs[0].filename)).size() -if len(image_size) < 3 or len(image_size) > 4: - app.error('only 3 and 4 dimensional images can be used to build a template') -if len(image_size) == 4: - val = (math.sqrt (1 + 8 * image_size[3]) - 3.0) / 4.0 - if not (val - int(val)) and not noreorientation: - app.console("SH series detected, performing FOD registration") - do_fod_registration = True - -#rigid options -if app.args.rigid_scale: - rigid_scales = [float(x) for x in app.args.rigid_scale.split(',')] - if not dorigid: - app.error("rigid_scales option set when no rigid registration is performed") -if app.args.rigid_lmax: - if not dorigid: - app.error("rigid_lmax option set when no rigid registration is performed") - rigid_lmax = [int(x) for x in app.args.rigid_lmax.split(',')] - if do_fod_registration and len(rigid_scales) != len(rigid_lmax): - app.error('rigid_scales and rigid_lmax schedules are not equal in length') - -rigid_niter = [100] * len(rigid_scales) -if app.args.rigid_niter: - if not dorigid: - app.error("rigid_niter specified when no rigid registration is performed") - rigid_niter = [int(x) for x in app.args.rigid_niter.split(',')] - if len(rigid_niter) == 1: - rigid_niter = rigid_niter * len(rigid_scales) - elif len(rigid_scales) != len(rigid_niter): - app.error('rigid_scales and rigid_niter schedules are not equal in length') - -# affine options -if app.args.affine_scale: - affine_scales = [float(x) for x in app.args.affine_scale.split(',')] - if not doaffine: - app.error("affine_scale option set when no affine registration is performed") -if app.args.affine_lmax: - if not doaffine: - app.error("affine_lmax option set when no affine registration is performed") - affine_lmax = [int(x) for x in app.args.affine_lmax.split(',')] - if do_fod_registration and len(affine_scales) != len(affine_lmax): - app.error('affine_scales and affine_lmax schedules are not equal in length') - -affine_niter = [500] * len(affine_scales) -if app.args.affine_niter: - if not doaffine: - app.error("affine_niter specified when no affine registration is performed") - affine_niter = [int(x) for x in app.args.affine_niter.split(',')] - if len(affine_niter) == 1: - affine_niter = affine_niter * len(affine_scales) - elif len(affine_scales) != len(affine_niter): - app.error('affine_scales and affine_niter schedules are not equal in length') - -linear_scales = [] -linear_lmax = [] -linear_niter = [] -linear_type = [] -if dorigid: - linear_scales += rigid_scales - linear_lmax += rigid_lmax - linear_niter += rigid_niter - linear_type += ['rigid'] * len(rigid_scales) - -if doaffine: - linear_scales += affine_scales - linear_lmax += affine_lmax - linear_niter += affine_niter - linear_type += ['affine'] * len(affine_scales) - -assert len(linear_type) == len(linear_scales) -assert len(linear_scales) == len(linear_niter) -if do_fod_registration: - assert len(linear_lmax) == len(linear_niter) - -app.console('initial alignment of images: %s' % initial_alignment) - -if dolinear: - app.console('linear registration stages:') + Attributes + ---------- + suff: list of str + identifiers used for contrast-specific filenames and folders ['_c0', '_c1', ...] + + names: list of str + derived from constrast-specific input folder + + templates_out: list of str + full path to output templates + + templates: list of str + holds current template names during registration + + n_volumes: list of int + number of volumes in each contrast + + fod_reorientation: list of bool + whether to perform FOD reorientation with mrtransform + + isfinite_count: list of str + filenames of images holding (weighted) number of finite-valued voxels across all images + + mc_weight_: list of str + contrast-specific weight used during initialisation / registration + + _weight_option: list of str + weight option to be passed to mrregister, = {'initial_alignment', 'rigid', 'affine', 'nl'} + + n_contrasts: int + + """ + + def __init__(self): + from mrtrix3 import MRtrixError, path, app # pylint: disable=no-name-in-module, import-outside-toplevel + + n_contrasts = len(app.ARGS.input_dir) + + self.suff = ["_c" + c for c in map(str, range(n_contrasts))] + self.names = [os.path.relpath(f, os.path.commonprefix(app.ARGS.input_dir)) for f in app.ARGS.input_dir] + + self.templates_out = [path.from_user(t, True) for t in app.ARGS.template] + + self.mc_weight_initial_alignment = [None for _ in range(self.n_contrasts)] + self.mc_weight_rigid = [None for _ in range(self.n_contrasts)] + self.mc_weight_affine = [None for _ in range(self.n_contrasts)] + self.mc_weight_nl = [None for _ in range(self.n_contrasts)] + self.initial_alignment_weight_option = [None for _ in range(self.n_contrasts)] + self.rigid_weight_option = [None for _ in range(self.n_contrasts)] + self.affine_weight_option = [None for _ in range(self.n_contrasts)] + self.nl_weight_option = [None for _ in range(self.n_contrasts)] + + self.isfinite_count = ['isfinite' + c + '.mif' for c in self.suff] + self.templates = [None for _ in range(self.n_contrasts)] + self.n_volumes = [None for _ in range(self.n_contrasts)] + self.fod_reorientation = [None for _ in range(self.n_contrasts)] + + + for mode in ['initial_alignment', 'rigid', 'affine', 'nl']: + opt = app.ARGS.__dict__.get('mc_weight_' + mode, None) + if opt: + if n_contrasts == 1: + raise MRtrixError('mc_weight_' + mode+' requires multiple input contrasts') + opt = opt.split(',') + if len(opt) != n_contrasts: + raise MRtrixError('mc_weight_' + mode+' needs to be defined for each contrast') + else: + opt = ["1"] * n_contrasts + self.__dict__['mc_weight_%s' % mode] = opt + self.__dict__['%s_weight_option' % mode] = ' -mc_weights '+','.join(opt)+' ' if n_contrasts > 1 else '' + + if len(self.templates_out) != n_contrasts: + raise MRtrixError('number of templates (%i) does not match number of input directories (%i)' % + (len(self.templates_out), n_contrasts)) + + @property + def n_contrasts(self): + return len(self.suff) + + def __repr__(self, *args, **kwargs): + text = '' + for cid in range(self.n_contrasts): + text += '\tcontrast: %s, template: %s, suffix: %s\n' % (self.names[cid], self.templates_out[cid], self.suff[cid]) + return text + + +class Input(object): + """ + Class that holds input information specific to a single image (multiple contrasts) + + Attributes + ---------- + uid: str + unique identifier for these input image(s), does not contain spaces + + ims_path: list of str + full path to input images, shell quoted OR paths to cached file if cache_local was called + + msk_path: str + full path to input mask, shell quoted OR path to cached file if cache_local was called + + ims_filenames : list of str + for each contrast the input file paths stripped of their respective directories. Used for final output only. + + msk_filename: str + as ims_filenames + + ims_transformed: list of str + input_transformed/.mif + + msk_transformed: list of str + mask_transformed/.mif + + aggregation_weight: float + weights used in image aggregation that forms the template. Has to be normalised across inputs. + + _im_directories : list of str + full path to user-provided input directories containing the input images, one for each contrast + + _msk_directory: str + full path to user-provided mask directory + + _local_ims: list of str + path to cached input images + + _local_msk: str + path to cached input mask + + Methods + ------- + cache_local() + copy files into folders in current working directory. modifies _local_ims and _local_msk + + """ + def __init__(self, uid, filenames, directories, contrasts, mask_filename='', mask_directory=''): + self.contrasts = contrasts + + self.uid = uid + assert self.uid, "UID empty" + assert self.uid.count(' ') == 0, 'UID contains whitespace' + + assert len(directories) == len(filenames) + self.ims_filenames = filenames + self._im_directories = directories + + self.msk_filename = mask_filename + self._msk_directory = mask_directory + + n_contrasts = len(contrasts) + + self.ims_transformed = [os.path.join('input_transformed'+contrasts[cid], uid + '.mif') for cid in range(n_contrasts)] + self.msk_transformed = os.path.join('mask_transformed', uid + '.mif') + + self.aggregation_weight = None + + self._local_ims = [] + self._local_msk = None + + def __repr__(self, *args, **kwargs): + text = '\nInput [' + for key in sorted([k for k in self.__dict__ if not k.startswith('_')]): + text += '\n\t' + str(key) + ': ' + str(self.__dict__[key]) + text += '\n]' + return text + + def info(self): + message = ['input: ' + self.uid] + if self.aggregation_weight: + message += ['agg weight: ' + self.aggregation_weight] + for csuff, fname in zip(self.contrasts, self.ims_filenames): + message += [((csuff + ': ') if csuff else '') + '"' + fname + '"'] + if self.msk_filename: + message += ['mask: ' + self.msk_filename] + return ', '.join(message) + + def cache_local(self): + from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel + contrasts = self.contrasts + for cid, csuff in enumerate(contrasts): + if not os.path.isdir('input' + csuff): + path.make_dir('input' + csuff) + run.command('mrconvert ' + self.ims_path[cid] + ' ' + os.path.join('input' + csuff, self.uid + '.mif')) + self._local_ims = [os.path.join('input' + csuff, self.uid + '.mif') for csuff in contrasts] + if self.msk_filename: + if not os.path.isdir('mask'): + path.make_dir('mask') + run.command('mrconvert ' + self.msk_path + ' ' + os.path.join('mask', self.uid + '.mif')) + self._local_msk = os.path.join('mask', self.uid + '.mif') + + def get_ims_path(self, quoted=True): + """ return path to input images """ + from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel + if self._local_ims: + return self._local_ims + return [path.from_user(abspath(d, f), quoted) for d, f in zip(self._im_directories, self.ims_filenames)] + ims_path = property(get_ims_path) + + def get_msk_path(self, quoted=True): + """ return path to input mask """ + from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel + if self._local_msk: + return self._local_msk + return path.from_user(os.path.join(self._msk_directory, self.msk_filename), quoted) if self.msk_filename else None + msk_path = property(get_msk_path) + + +def parse_input_files(in_files, mask_files, contrasts, f_agg_weight=None): + """ + matches input images across contrasts and pair them with masks. + extracts unique identifiers from mask and image filenames by stripping common pre and postfix (per contrast and for masks) + + in_files: list of lists + the inner list holds filenames specific to a contrast + + mask_files: + can be empty + + returns list of Input + + checks: 3d_nonunity + TODO check if no common grid & trafo across contrasts (only relevant for robust init?) + + """ + from mrtrix3 import MRtrixError, app, path, image # pylint: disable=no-name-in-module, import-outside-toplevel + contrasts = contrasts.suff + inputs = [] + # mask uids + mask_uids = [] + if mask_files: + mask_common_postfix = get_common_postfix(mask_files) + if not mask_common_postfix: + raise MRtrixError('mask filenames do not have a common postfix') + mask_common_prefix = get_common_prefix([os.path.split(m)[1] for m in mask_files]) + for mfile in mask_files: + mask_uids.append(re.sub(re.escape(mask_common_postfix)+'$', '', + re.sub('^'+re.escape(mask_common_prefix), '', os.path.split(mfile)[1]))) + if len(mask_uids) != len(set(mask_uids)): + non_unique = [uid for uid, count in collections.Counter(mask_uids).items() if count > 1] + MRtrixError("mask identifiers not unique:\n" + '""\n'.join(non_unique)) + if app.VERBOSITY > 1: + app.console('mask uids:' + str(mask_uids)) + + # images uids + common_postfix = [get_common_postfix(files) for files in in_files] + common_prefix = [get_common_prefix(files) for files in in_files] + # xcontrast_xsubject_pre_postfix: prefix and postfix of the common part across contrasts and subjects, + # without image extensions and leading or trailing '_' or '-' + xcontrast_xsubject_pre_postfix = [get_common_postfix(common_prefix).lstrip('_-'), + get_common_prefix([re.sub('.('+'|'.join(IMAGEEXT)+')(.gz)?$', '', pfix).rstrip('_-') for pfix in common_postfix])] + if app.VERBOSITY > 1: + app.console("common_postfix: " + str(common_postfix)) + app.console("common_prefix: " + str(common_prefix)) + app.console("xcontrast_xsubject_pre_postfix: " + str(xcontrast_xsubject_pre_postfix)) + for ipostfix, postfix in enumerate(common_postfix): + if not postfix: + raise MRtrixError('image filenames do not have a common postfix:\n' + '\n'.join(in_files[ipostfix])) + + c_uids = [] + for cid, files in enumerate(in_files): + c_uids.append([re.sub(re.escape(common_postfix[cid])+'$', '', + re.sub('^'+re.escape(common_prefix[cid]), '', fname)) for fname in files]) + if not all([len(_uid) for _uid in c_uids[-1]]): + MRtrixError("No uniquely identifiable part of filename found in file :\n" + '""\n'.join(non_unique)) + + for uids in c_uids: + if len(uids) != len(set(uids)): + non_unique = [uid for uid, count in collections.Counter(uids).items() if count > 1] + MRtrixError("image identifiers not unique:\n" + '""\n'.join(non_unique)) + if app.VERBOSITY > 1: + app.console('uids by contrast:' + str(c_uids)) + + # join images and masks + for ifile, fname in enumerate(in_files[0]): + uid = c_uids[0][ifile] + fnames = [fname] + dirs = [abspath(path.from_user(app.ARGS.input_dir[0], False))] + if len(contrasts) > 1: + for cid in range(1, len(contrasts)): + dirs.append(abspath(path.from_user(app.ARGS.input_dir[cid], False))) + image.check_3d_nonunity(os.path.join(dirs[cid], in_files[cid][ifile])) + if uid != c_uids[cid][ifile]: + raise MRtrixError('no matching image was found for image %s and contrasts %s and %s.' % (fname, dirs[0], dirs[cid])) + fnames.append(in_files[cid][ifile]) + + if mask_files: + if uid not in mask_uids: + raise MRtrixError('no matching mask image was found for input image ' + fname + ' with uid "'+uid+'". ' + 'Mask uid candidates: ' + ', '.join(['"%s"' % m for m in mask_uids])) + index = mask_uids.index(uid) + # uid, filenames, directories, contrasts, mask_filename = '', mask_directory = '', agg_weight = None + inputs.append(Input(uid, fnames, dirs, contrasts, + mask_filename=mask_files[index], mask_directory=abspath(path.from_user(app.ARGS.mask_dir, False)))) + else: + inputs.append(Input(uid, fnames, dirs, contrasts)) + + # parse aggregation weights and match to inputs + if f_agg_weight: + import csv # pylint: disable=import-outside-toplevel + try: + with open(f_agg_weight, 'r') as fweights: + agg_weights = dict((row[0].lstrip().rstrip(), row[1]) for row in csv.reader(fweights, delimiter=',', quotechar='#')) + except UnicodeDecodeError: + with open(f_agg_weight, 'r') as fweights: + reader = csv.reader(fweights.read().decode('utf-8', errors='replace'), delimiter=',', quotechar='#') + agg_weights = dict((row[0].lstrip().rstrip(), row[1]) for row in reader) + pref = '^' + re.escape(get_common_prefix(list(agg_weights.keys()))) + suff = re.escape(get_common_postfix(list(agg_weights.keys()))) + '$' + for key in agg_weights.keys(): + agg_weights[re.sub(suff, '', re.sub(pref, '', key))] = agg_weights.pop(key).strip() + + for inp in inputs: + if inp.uid not in agg_weights: + raise MRtrixError('aggregation weight not found for %s' % inp.uid) + inp.aggregation_weight = agg_weights[inp.uid] + app.console('Using aggregation weights ' + f_agg_weight) + weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] + if sum(weights) <= 0: + raise MRtrixError('Sum of aggregation weights is not positive: ' + str(weights)) + if any([w < 0 for w in weights]): + app.warn('Negative aggregation weights: ' + str(weights)) + + return inputs, xcontrast_xsubject_pre_postfix + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import MRtrixError, app, image, matrix, path, run #pylint: disable=no-name-in-module, import-outside-toplevel + + if not app.ARGS.type in REGISTRATION_MODES: + raise MRtrixError("registration type must be one of %s. provided: %s" % (str(REGISTRATION_MODES), app.ARGS.type)) + dorigid = "rigid" in app.ARGS.type + doaffine = "affine" in app.ARGS.type + dolinear = dorigid or doaffine + dononlinear = "nonlinear" in app.ARGS.type + assert (dorigid + doaffine + dononlinear >= 1), "FIXME: registration type not valid" + + + input_output = app.ARGS.input_dir + [app.ARGS.template] + n_contrasts = len(input_output) // 2 + if len(input_output) != 2 * n_contrasts: + raise MRtrixError('expected two arguments per contrast, received %i: %s' % (len(input_output), ', '.join(input_output))) + if n_contrasts > 1: + app.console('Generating population template using multi-contrast registration') + + # reorder arguments for multi-contrast registration as after command line parsing app.ARGS.input_dir holds all but one argument + app.ARGS.input_dir = [] + app.ARGS.template = [] + for i_contrast in range(n_contrasts): + inargs = (input_output[i_contrast*2], input_output[i_contrast*2+1]) + if not os.path.isdir(inargs[0]): + raise MRtrixError('input directory %s not found' % inargs[0]) + app.ARGS.input_dir.append(relpath(inargs[0])) + app.ARGS.template.append(relpath(inargs[1])) + + cns = Contrasts() + app.debug(str(cns)) + + in_files = [sorted(path.all_in_dir(input_dir, dir_path=False)) for input_dir in app.ARGS.input_dir] + if len(in_files[0]) <= 1: + raise MRtrixError('Not enough images found in input directory ' + app.ARGS.input_dir[0] + + '. More than one image is needed to generate a population template') + if n_contrasts > 1: + for cid in range(1, n_contrasts): + if len(in_files[cid]) != len(in_files[0]): + raise MRtrixError('Found %i images in input directory %s ' % (len(app.ARGS.input_dir[0]), app.ARGS.input_dir[0]) + + 'but %i input images in %s.' % (len(app.ARGS.input_dir[cid]), app.ARGS.input_dir[cid])) + else: + app.console('Generating a population-average template from ' + str(len(in_files[0])) + ' input images') + if n_contrasts > 1: + app.console('using ' + str(len(in_files)) + ' contrasts for each input image') + + voxel_size = None + if app.ARGS.voxel_size: + voxel_size = app.ARGS.voxel_size.split(',') + if len(voxel_size) == 1: + voxel_size = voxel_size * 3 + try: + if len(voxel_size) != 3: + raise ValueError + [float(v) for v in voxel_size] #pylint: disable=expression-not-assigned + except ValueError: + raise MRtrixError('voxel size needs to be a single or three comma-separated floating point numbers; received: ' + str(app.ARGS.voxel_size)) + + agg_measure = 'mean' + if app.ARGS.aggregate is not None: + if not app.ARGS.aggregate in AGGREGATION_MODES: + app.error("aggregation type must be one of %s. provided: %s" % (str(AGGREGATION_MODES), app.ARGS.aggregate)) + agg_measure = app.ARGS.aggregate + + agg_weights = app.ARGS.aggregation_weights + if agg_weights is not None: + agg_measure = "weighted_" + agg_measure + if agg_measure != 'weighted_mean': + app.error("aggregation weights require '-aggregate mean' option. provided: %s" % (app.ARGS.aggregate)) + if not os.path.isfile(app.ARGS.aggregation_weights): + app.error("aggregation weights file not found: %s" % app.ARGS.aggregation_weights) + + initial_alignment = app.ARGS.initial_alignment + if initial_alignment not in ["mass", "robust_mass", "geometric", "none"]: + raise MRtrixError('initial_alignment must be one of ' + " ".join(["mass", "robust_mass", "geometric", "none"]) + " provided: " + str(initial_alignment)) + + linear_estimator = app.ARGS.linear_estimator + if linear_estimator and not linear_estimator.lower() == 'none': + if not dolinear: + raise MRtrixError('linear_estimator specified when no linear registration is requested') + if linear_estimator not in ["l1", "l2", "lp"]: + raise MRtrixError('linear_estimator must be one of ' + " ".join(["l1", "l2", "lp"]) + " provided: " + str(linear_estimator)) + + use_masks = False + mask_files = [] + if app.ARGS.mask_dir: + use_masks = True + app.ARGS.mask_dir = relpath(app.ARGS.mask_dir) + if not os.path.isdir(app.ARGS.mask_dir): + raise MRtrixError('mask directory not found') + mask_files = sorted(path.all_in_dir(app.ARGS.mask_dir, dir_path=False)) + if len(mask_files) < len(in_files[0]): + raise MRtrixError('there are not enough mask images for the number of images in the input directory') + + if not use_masks: + app.warn('no masks input. Use input masks to reduce computation time and improve robustness') + + if app.ARGS.template_mask and not use_masks: + raise MRtrixError('you cannot output a template mask because no subject masks were input using -mask_dir') + + nanmask_input = app.ARGS.nanmask + if nanmask_input and not use_masks: + raise MRtrixError('you cannot use NaN masking when no subject masks were input using -mask_dir') + + ins, xcontrast_xsubject_pre_postfix = parse_input_files(in_files, mask_files, cns, agg_weights) + + leave_one_out = 'auto' + if app.ARGS.leave_one_out is not None: + leave_one_out = app.ARGS.leave_one_out + if not leave_one_out in ['0', '1', 'auto']: + raise MRtrixError('leave_one_out not understood: ' + str(leave_one_out)) + if leave_one_out == 'auto': + leave_one_out = 2 < len(ins) < 15 + else: + leave_one_out = bool(int(leave_one_out)) + if leave_one_out: + app.console('performing leave-one-out registration') + # check that at sum of weights is positive for any grouping if weighted aggregation is used + weights = [float(inp.aggregation_weight) for inp in ins if inp.aggregation_weight is not None] + if weights and sum(weights) - max(weights) <= 0: + raise MRtrixError('leave-one-out registration requires positive aggregation weights in all groupings') + + noreorientation = app.ARGS.noreorientation + + do_pause_on_warn = True + if app.ARGS.linear_no_pause: + do_pause_on_warn = False + if not dolinear: + raise MRtrixError("linear option set when no linear registration is performed") + + if len(app.ARGS.template) != n_contrasts: + raise MRtrixError('mismatch between number of output templates (%i) ' % len(app.ARGS.template) + + 'and number of contrasts (%i)' % n_contrasts) + for templ in app.ARGS.template: + app.check_output_path(templ) + + if app.ARGS.warp_dir: + app.ARGS.warp_dir = relpath(app.ARGS.warp_dir) + app.check_output_path(app.ARGS.warp_dir) + + if app.ARGS.transformed_dir: + app.ARGS.transformed_dir = [relpath(d) for d in app.ARGS.transformed_dir.split(',')] + if len(app.ARGS.transformed_dir) != n_contrasts: + raise MRtrixError('require multiple comma separated transformed directories if multi-contrast registration is used') + for tdir in app.ARGS.transformed_dir: + app.check_output_path(tdir) + + if app.ARGS.linear_transformations_dir: + if not dolinear: + raise MRtrixError("linear option set when no linear registration is performed") + app.ARGS.linear_transformations_dir = relpath(app.ARGS.linear_transformations_dir) + app.check_output_path(app.ARGS.linear_transformations_dir) + + # automatically detect SH series in each contrast + do_fod_registration = False # in any contrast + cns.n_volumes = [] + cns.fod_reorientation = [] + for cid in range(n_contrasts): + header = image.Header(ins[0].get_ims_path(False)[cid]) + image_size = header.size() + if len(image_size) < 3 or len(image_size) > 4: + raise MRtrixError('only 3 and 4 dimensional images can be used to build a template') + if len(image_size) == 4: + cns.fod_reorientation.append(header.is_sh() and not noreorientation) + cns.n_volumes.append(image_size[3]) + do_fod_registration = do_fod_registration or cns.fod_reorientation[-1] + else: + cns.fod_reorientation.append(False) + cns.n_volumes.append(0) if do_fod_registration: - for istage, [tpe, scale, lmax, niter] in enumerate(zip (linear_type, linear_scales, linear_lmax, linear_niter)): - app.console('(%02i) %s scale: %.4f, niter: %i, lmax: %i' %(istage, tpe.ljust(9), scale, niter, lmax)) + app.console("SH Series detected, performing FOD registration in contrast: " + + ', '.join(app.ARGS.input_dir[cid] for cid in range(n_contrasts) if cns.fod_reorientation[cid])) + c_mrtransform_reorientation = [' -reorient_fod ' + ('yes' if cns.fod_reorientation[cid] else 'no') + ' ' + for cid in range(n_contrasts)] + + if nanmask_input: + app.console("NaN masking transformed images") + + # rigid options + if app.ARGS.rigid_scale: + rigid_scales = [float(x) for x in app.ARGS.rigid_scale.split(',')] + if not dorigid: + raise MRtrixError("rigid_scales option set when no rigid registration is performed") + else: + rigid_scales = DEFAULT_RIGID_SCALES + if app.ARGS.rigid_lmax: + if not dorigid: + raise MRtrixError("rigid_lmax option set when no rigid registration is performed") + rigid_lmax = [int(x) for x in app.ARGS.rigid_lmax.split(',')] + if do_fod_registration and len(rigid_scales) != len(rigid_lmax): + raise MRtrixError('rigid_scales and rigid_lmax schedules are not equal in length') + else: + rigid_lmax = DEFAULT_RIGID_LMAX + + rigid_niter = [100] * len(rigid_scales) + if app.ARGS.rigid_niter: + if not dorigid: + raise MRtrixError("rigid_niter specified when no rigid registration is performed") + rigid_niter = [int(x) for x in app.ARGS.rigid_niter.split(',')] + if len(rigid_niter) == 1: + rigid_niter = rigid_niter * len(rigid_scales) + elif len(rigid_scales) != len(rigid_niter): + raise MRtrixError('rigid_scales and rigid_niter schedules are not equal in length') + + # affine options + if app.ARGS.affine_scale: + affine_scales = [float(x) for x in app.ARGS.affine_scale.split(',')] + if not doaffine: + raise MRtrixError("affine_scale option set when no affine registration is performed") else: - for istage, [tpe, scale, niter] in enumerate(zip (linear_type, linear_scales, linear_niter)): - app.console('(%02i) %s scale: %.4f, niter: %i, no reorientation' %(istage, tpe.ljust(9), scale, niter)) - -datatype_option = ' -datatype float32' - -if not dononlinear: - nl_scales = [] - nl_lmax = [] - nl_niter = [] - if app.args.warp_dir: - app.error('warp_dir specified when no nonlinear registration is performed') -else: - if app.args.nl_scale: - nl_scales = [float(x) for x in app.args.nl_scale.split(',')] - if app.args.nl_niter: - nl_niter = [int(x) for x in app.args.nl_niter.split(',')] - if app.args.nl_lmax: - nl_lmax = [int(x) for x in app.args.nl_lmax.split(',')] - - if len(nl_scales) != len(nl_niter): - app.error('nl_scales and nl_niter schedules are not equal in length') - - app.console('nonlinear registration stages:') + affine_scales = DEFAULT_AFFINE_SCALES + if app.ARGS.affine_lmax: + if not doaffine: + raise MRtrixError("affine_lmax option set when no affine registration is performed") + affine_lmax = [int(x) for x in app.ARGS.affine_lmax.split(',')] + if do_fod_registration and len(affine_scales) != len(affine_lmax): + raise MRtrixError('affine_scales and affine_lmax schedules are not equal in length') + else: + affine_lmax = DEFAULT_AFFINE_LMAX + + affine_niter = [500] * len(affine_scales) + if app.ARGS.affine_niter: + if not doaffine: + raise MRtrixError("affine_niter specified when no affine registration is performed") + affine_niter = [int(x) for x in app.ARGS.affine_niter.split(',')] + if len(affine_niter) == 1: + affine_niter = affine_niter * len(affine_scales) + elif len(affine_scales) != len(affine_niter): + raise MRtrixError('affine_scales and affine_niter schedules are not equal in length') + + linear_scales = [] + linear_lmax = [] + linear_niter = [] + linear_type = [] + if dorigid: + linear_scales += rigid_scales + linear_lmax += rigid_lmax + linear_niter += rigid_niter + linear_type += ['rigid'] * len(rigid_scales) + + if doaffine: + linear_scales += affine_scales + linear_lmax += affine_lmax + linear_niter += affine_niter + linear_type += ['affine'] * len(affine_scales) + + assert len(linear_type) == len(linear_scales) + assert len(linear_scales) == len(linear_niter) if do_fod_registration: - if len(nl_scales) != len(nl_lmax): - app.error('nl_scales and nl_lmax schedules are not equal in length') + assert len(linear_lmax) == len(linear_niter) + app.console('-' * 60) + app.console('initial alignment of images: %s' % initial_alignment) + app.console('-' * 60) + if n_contrasts > 1: + for cid in range(n_contrasts): + app.console('\tcontrast "%s": %s, ' % (cns.suff[cid], cns.names[cid]) + + 'objective weight: %s' % cns.mc_weight_initial_alignment[cid]) + + if dolinear: + app.console('-' * 60) + app.console('linear registration stages:') + app.console('-' * 60) + if n_contrasts > 1: + for cid in range(n_contrasts): + msg = '\tcontrast "%s": %s' % (cns.suff[cid], cns.names[cid]) + if 'rigid' in linear_type: + msg += ', objective weight rigid: %s' % cns.mc_weight_rigid[cid] + if 'affine' in linear_type: + msg += ', objective weight affine: %s' % cns.mc_weight_affine[cid] + app.console(msg) + + if do_fod_registration: + for istage, [tpe, scale, lmax, niter] in enumerate(zip(linear_type, linear_scales, linear_lmax, linear_niter)): + app.console('(%02i) %s scale: %.4f, niter: %i, lmax: %i' % (istage, tpe.ljust(9), scale, niter, lmax)) + else: + for istage, [tpe, scale, niter] in enumerate(zip(linear_type, linear_scales, linear_niter)): + app.console('(%02i) %s scale: %.4f, niter: %i, no reorientation' % (istage, tpe.ljust(9), scale, niter)) - if do_fod_registration: - for istage, [scale, lmax, niter] in enumerate(zip (nl_scales, nl_lmax, nl_niter)): - app.console('(%02i) nonlinear scale: %.4f, niter: %i, lmax: %i' %(istage, scale, niter, lmax)) + datatype_option = ' -datatype float32' + outofbounds_option = ' -nan' + + if not dononlinear: + nl_scales = [] + nl_lmax = [] + nl_niter = [] + if app.ARGS.warp_dir: + raise MRtrixError('warp_dir specified when no nonlinear registration is performed') else: - for istage, [scale, niter] in enumerate(zip (nl_scales, nl_niter)): - app.console('(%02i) nonlinear scale: %.4f, niter: %i, no reorientation' %(istage, scale, niter)) - -app.makeTempDir() -app.gotoTempDir() - -file.makeDir('inputs_transformed') -file.makeDir('linear_transforms_initial') -file.makeDir('linear_transforms') -for level in range(0, len(linear_scales)): - file.makeDir('linear_transforms_%i' % level) -for level in range(0, len(nl_scales)): - file.makeDir('warps_%i' % level) - -if useMasks: - file.makeDir('masks_transformed') -write_log = (app.verbosity >= 2) -if write_log: - file.makeDir('log') - -# Make initial template in average space -app.console('Generating initial template') -input_filenames = [] -for i in inputs: - input_filenames.append(abspath(i.directory, i.filename)) -if voxel_size is None: - run.command('mraverageheader ' + ' '.join(input_filenames) + ' average_header.mif -fill') -else: - run.command('mraverageheader -fill ' + ' '.join(input_filenames) + ' - | mrresize - -voxel '+','.join(voxel_size)+' average_header.mif') - -# crop average space to extent defined by original masks -if useMasks: - progress = app.progressBar('Importing input masks to average space for template cropping', len(inputs)) - mask_filenames = [] - for i in inputs: - run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) + \ - ' -interp nearest -template average_header.mif ' + os.path.join('masks_transformed', i.mask_filename)) - mask_filenames.append(os.path.join('masks_transformed', i.mask_filename)) - progress.increment() - progress.done() - run.command('mrmath ' + ' '.join(mask_filenames) + ' max mask_initial.mif' ) - run.command('mrcrop ' + 'average_header.mif -mask mask_initial.mif average_header_cropped.mif') - run.function(remove,'mask_initial.mif') - run.function(remove, 'average_header.mif') - run.function(move, 'average_header_cropped.mif', 'average_header.mif') - progress = app.progressBar('Erasing temporary mask images', len(mask_filenames)) - for mask in mask_filenames: - run.function(remove, mask) - progress.increment() - progress.done() - - - -if initial_alignment == 'none': - progress = app.progressBar('Resampling input images to template space with no initial alignment', len(inputs)) - for i in inputs: - run.command('mrtransform ' + abspath(i.directory, i.filename) + ' -interp linear -template average_header.mif ' + os.path.join('inputs_transformed', i.prefix + '.mif') + datatype_option) - progress.increment() - progress.done() - if not dolinear: - for i in inputs: - with open(os.path.join('linear_transforms_initial','%s.txt' % (i.prefix)),'w') as fout: - fout.write('1 0 0 0\n0 1 0 0\n0 0 1 0\n0 0 0 1\n') -else: - progress = app.progressBar('Performing initial rigid registration to template', len(inputs)) - mask = '' - for i in inputs: - if useMasks: - mask = ' -mask1 ' + abspath(i.mask_directory, i.mask_filename) - output = ' -rigid ' + os.path.join('linear_transforms_initial', i.prefix + '.txt') - run.command('mrregister ' + abspath(i.directory, i.filename) + ' average_header.mif' + \ - mask + \ - ' -rigid_scale 1 ' + \ - ' -rigid_niter 0 ' + \ - ' -type rigid ' + \ - ' -noreorientation ' + \ - ' -rigid_init_translation ' + initial_alignment + ' ' + \ - datatype_option + \ - output) - # translate input images to centre of mass without interpolation - run.command('mrtransform ' + abspath(i.directory, i.filename) + \ - ' -linear ' + os.path.join('linear_transforms_initial', i.prefix + '.txt') + \ - datatype_option + \ - ' ' + os.path.join('inputs_transformed', i.prefix + '_translated.mif')) - if useMasks: - run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) + \ - ' -linear ' + os.path.join('linear_transforms_initial', i.prefix + '.txt') + \ - datatype_option + ' ' + \ - os.path.join('masks_transformed', i.prefix + '_translated.mif')) - progress.increment() - progress.done() - # update average space to new extent - run.command('mraverageheader ' + ' '.join([os.path.join('inputs_transformed', i.prefix + '_translated.mif') for i in inputs]) + ' average_header_tight.mif') + nl_scales = [float(x) for x in app.ARGS.nl_scale.split(',')] if app.ARGS.nl_scale else DEFAULT_NL_SCALES + nl_niter = [int(x) for x in app.ARGS.nl_niter.split(',')] if app.ARGS.nl_niter else DEFAULT_NL_NITER + nl_lmax = [int(x) for x in app.ARGS.nl_lmax.split(',')] if app.ARGS.nl_lmax else DEFAULT_NL_LMAX + + if len(nl_scales) != len(nl_niter): + raise MRtrixError('nl_scales and nl_niter schedules are not equal in length') + + app.console('-' * 60) + app.console('nonlinear registration stages:') + app.console('-' * 60) + if n_contrasts > 1: + for cid in range(n_contrasts): + app.console('\tcontrast "%s": %s, objective weight: %s' % (cns.suff[cid], cns.names[cid], cns.mc_weight_nl[cid])) + + if do_fod_registration: + if len(nl_scales) != len(nl_lmax): + raise MRtrixError('nl_scales and nl_lmax schedules are not equal in length') + + if do_fod_registration: + for istage, [scale, lmax, niter] in enumerate(zip(nl_scales, nl_lmax, nl_niter)): + app.console('(%02i) nonlinear scale: %.4f, niter: %i, lmax: %i' % (istage, scale, niter, lmax)) + else: + for istage, [scale, niter] in enumerate(zip(nl_scales, nl_niter)): + app.console('(%02i) nonlinear scale: %.4f, niter: %i, no reorientation' % (istage, scale, niter)) + + app.console('-' * 60) + app.console('input images:') + app.console('-' * 60) + for inp in ins: + app.console('\t' + inp.info()) + + app.make_scratch_dir() + app.goto_scratch_dir() + + for contrast in cns.suff: + path.make_dir('input_transformed' + contrast) + + for contrast in cns.suff: + path.make_dir('isfinite' + contrast) + + path.make_dir('linear_transforms_initial') + path.make_dir('linear_transforms') + for level in range(0, len(linear_scales)): + path.make_dir('linear_transforms_%02i' % level) + for level in range(0, len(nl_scales)): + path.make_dir('warps_%02i' % level) + + if use_masks: + path.make_dir('mask_transformed') + write_log = (app.VERBOSITY >= 2) + if write_log: + path.make_dir('log') + + if initial_alignment == 'robust_mass': + if not use_masks: + raise MRtrixError('robust_mass initial alignment requires masks') + path.make_dir('robust') + + if app.ARGS.copy_input: + app.console('Copying images into scratch directory') + for inp in ins: + inp.cache_local() + + # Make initial template in average space using first contrast + app.console('Generating initial template') + input_filenames = [inp.get_ims_path(False)[0] for inp in ins] if voxel_size is None: - run.command('mrpad -uniform 10 average_header_tight.mif average_header.mif -force') + run.command(['mraverageheader', input_filenames, 'average_header.mif', '-fill']) else: - run.command('mrpad -uniform 10 average_header_tight.mif - | mrresize - -voxel '+','.join(voxel_size)+' average_header.mif -force') - run.function(remove, 'average_header_tight.mif') - if useMasks: - # reslice masks - progress = app.progressBar('Reslicing input masks to average header', len(inputs)) - for i in inputs: - run.command('mrtransform ' + \ - os.path.join('masks_transformed', i.prefix + '_translated.mif') + ' ' + \ - os.path.join('masks_transformed', i.prefix + '.mif') + ' ' + \ - '-interp nearest -template average_header.mif' + \ - datatype_option) + run.command(['mraverageheader', '-fill', input_filenames, '-', '|', + 'mrgrid', '-', 'regrid', '-voxel', ','.join(map(str, voxel_size)), 'average_header.mif']) + + # crop average space to extent defined by original masks + if use_masks: + progress = app.ProgressBar('Importing input masks to average space for template cropping', len(ins)) + for inp in ins: + run.command('mrtransform ' + inp.msk_path + ' -interp nearest -template average_header.mif ' + inp.msk_transformed) progress.increment() progress.done() - # crop average space to extent defined by translated masks - mask_filenames = [] - for i in inputs: - mask_filenames.append(os.path.join('masks_transformed', i.prefix + '.mif')) - run.command('mrmath ' + ' '.join(mask_filenames) + ' max mask_translated.mif' ) - run.command('mrcrop ' + 'average_header.mif -mask mask_translated.mif average_header_cropped.mif') - # pad average space to allow for deviation from initial alignment - run.command('mrpad -uniform 10 average_header_cropped.mif -force average_header.mif') - run.function(remove, 'average_header_cropped.mif') - progress = app.progressBar('Reslicing mask images to new padded average header', len(inputs)) - for i in inputs: - run.command('mrtransform ' + - os.path.join('masks_transformed', i.prefix + '_translated.mif') + ' ' + \ - os.path.join('masks_transformed', i.prefix + '.mif') + ' ' + \ - '-interp nearest -template average_header.mif' + \ - datatype_option + ' ' + \ - '-force') - run.function(remove, os.path.join('masks_transformed', i.prefix + '_translated.mif')) + run.command(['mrmath', [inp.msk_transformed for inp in ins], 'max', 'mask_initial.mif']) + run.command('mrgrid average_header.mif crop -mask mask_initial.mif average_header_cropped.mif') + run.function(os.remove, 'mask_initial.mif') + run.function(os.remove, 'average_header.mif') + run.function(shutil.move, 'average_header_cropped.mif', 'average_header.mif') + progress = app.ProgressBar('Erasing temporary mask images', len(ins)) + for inp in ins: + run.function(os.remove, inp.msk_transformed) progress.increment() progress.done() - run.function(remove, 'mask_translated.mif') - # reslice input images - progress = app.progressBar('Reslicing input images to average header', len(inputs)) - for i in inputs: - run.command('mrtransform ' + \ - os.path.join('inputs_transformed', i.prefix + '_translated.mif') + ' ' + \ - os.path.join('inputs_transformed', i.prefix + '.mif') + ' ' + \ - '-interp linear -template average_header.mif' + \ - datatype_option) - run.function(remove, os.path.join('inputs_transformed', i.prefix + '_translated.mif')) - progress.increment() - progress.done() - - -run.command('mrmath ' + ' '.join(allindir('inputs_transformed')) + ' mean initial_template.mif') -current_template = 'initial_template.mif' - - -# Optimise template with linear registration -if not dolinear: - for i in inputs: - run.function(copy, os.path.join('linear_transforms_initial','%s.txt' % (i.prefix)), os.path.join('linear_transforms','%s.txt' % (i.prefix))) -else: - for level, (regtype, scale, niter, lmax) in enumerate(zip(linear_type, linear_scales, linear_niter, linear_lmax)): - progress = app.progressBar('Optimising template with linear registration (stage {0} of {1})'.format(level+1, len(linear_scales)), len(inputs)) - for i in inputs: - initialise_option = '' - if useMasks: - mask_option = ' -mask1 ' + abspath(i.mask_directory, i.mask_filename) - else: - mask_option = '' - lmax_option = '' - metric_option = '' - mrregister_log_option = '' - if regtype == 'rigid': - scale_option = ' -rigid_scale ' + str(scale) - niter_option = ' -rigid_niter ' + str(niter) - regtype_option = ' -type rigid' - output_option = ' -rigid ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix) - if level > 0: - initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms_%i' % (level - 1), '%s.txt' % i.prefix) - if do_fod_registration: - lmax_option = ' -rigid_lmax ' + str(lmax) - else: - lmax_option = ' -noreorientation' - if linear_estimator: - metric_option = ' -rigid_metric.diff.estimator ' + linear_estimator - if app.verbosity >= 2: - mrregister_log_option = ' -info -rigid_log ' + os.path.join('log', i.filename + "_" + str(level) + '.log') + + # create average space headers for other contrasts + if n_contrasts > 1: + avh3d = 'average_header3d.mif' + avh4d = 'average_header4d.mif' + if len(image.Header('average_header.mif').size()) == 3: + run.command('mrconvert average_header.mif ' + avh3d) + else: + run.command('mrconvert average_header.mif -coord 3 0 -axes 0,1,2 ' + avh3d) + run.command('mrconvert ' + avh3d + ' -axes 0,1,2,-1 ' + avh4d) + for cid in range(n_contrasts): + if cns.n_volumes[cid] == 0: + run.function(copy, avh3d, 'average_header' + cns.suff[cid] + '.mif') + elif cns.n_volumes[cid] == 1: + run.function(copy, avh4d, 'average_header' + cns.suff[cid] + '.mif') else: - scale_option = ' -affine_scale ' + str(scale) - niter_option = ' -affine_niter ' + str(niter) - regtype_option = ' -type affine' - output_option = ' -affine ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix) - if level > 0: - initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms_%i' % (level - 1), '%s.txt' % i.prefix) - if do_fod_registration: - lmax_option = ' -affine_lmax ' + str(lmax) - else: - lmax_option = ' -noreorientation' - if linear_estimator: - metric_option = ' -affine_metric.diff.estimator ' + linear_estimator - if write_log: - mrregister_log_option = ' -info -affine_log ' + os.path.join('log', i.filename + '_' + str(level) + '.log') - - command = 'mrregister ' + abspath(i.directory, i.filename) + ' ' + current_template + \ - ' -force' + \ - initialise_option + \ - mask_option + \ - scale_option + \ - niter_option + \ - lmax_option + \ - regtype_option + \ - metric_option + \ - datatype_option + \ - output_option + \ - mrregister_log_option - run.command(command) - check_linear_transformation(os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix), command, pause_on_warn=do_pause_on_warn) - progress.increment() + run.command('mrcat ' + ' '.join([avh3d] * cns.n_volumes[cid]) + ' -axis 3 average_header' + cns.suff[cid] + '.mif') + run.function(os.remove, avh3d) + run.function(os.remove, avh4d) + else: + run.function(shutil.move, 'average_header.mif', 'average_header' + cns.suff[0] + '.mif') + + cns.templates = ['average_header' + csuff + '.mif' for csuff in cns.suff] + + if initial_alignment == 'none': + progress = app.ProgressBar('Resampling input images to template space with no initial alignment', len(ins) * n_contrasts) + for inp in ins: + for cid in range(n_contrasts): + run.command('mrtransform ' + inp.ims_path[cid] + c_mrtransform_reorientation[cid] + ' -interp linear ' + + '-template ' + cns.templates[cid] + ' ' + inp.ims_transformed[cid] + + outofbounds_option + + datatype_option) + progress.increment() progress.done() - # Here we ensure the template doesn't drift or scale - run.command('transformcalc ' + ' '.join(allindir('linear_transforms_%i' % level)) + ' average linear_transform_average.txt -force -quiet') - if linear_type[level] == 'rigid': - run.command('transformcalc linear_transform_average.txt rigid linear_transform_average.txt -force -quiet') - run.command('transformcalc linear_transform_average.txt invert linear_transform_average_inv.txt -force -quiet') - - average_inv = run.function(loadtxt, 'linear_transform_average_inv.txt') - if average_inv is not None: - for i in inputs: - transform = dot(loadtxt(os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix)), average_inv) - savetxt(os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix), transform) - - progress = app.progressBar('Transforming all subjects to revised template', len(inputs)) - for i in inputs: - run.command('mrtransform ' + abspath(i.directory, i.filename) + ' ' + \ - '-template ' + current_template + ' ' + \ - '-linear ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix) + ' ' + \ - os.path.join('inputs_transformed', '%s.mif' % i.prefix) + \ - datatype_option + ' ' + \ - '-force') - progress.increment() - progress.done() + if use_masks: + progress = app.ProgressBar('Reslicing input masks to average header', len(ins)) + for inp in ins: + run.command('mrtransform ' + inp.ims_path[cid] + ' ' + inp.msk_transformed + ' ' + + '-interp nearest -template ' + cns.templates[0] + ' ' + + datatype_option) + progress.increment() + progress.done() - run.command('mrmath ' + ' '.join(allindir('inputs_transformed')) + ' mean linear_template' + str(level) + '.mif -force') - current_template = 'linear_template' + str(level) + '.mif' - - for entry in os.listdir('linear_transforms_%i' % level): - run.function(copy, os.path.join('linear_transforms_%i' % level, entry), os.path.join('linear_transforms', entry)) - -# Create a template mask for nl registration by taking the intersection of all transformed input masks and dilating -if useMasks and (dononlinear or app.args.template_mask): - progress = app.progressBar('Generating template mask for non-linear registration', len(inputs)) - for i in inputs: - run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) + ' ' + \ - '-template ' + current_template + ' ' + \ - '-interp nearest ' + \ - '-linear ' + os.path.join('linear_transforms', '%s.txt' % i.prefix) + ' ' + \ - os.path.join('masks_transformed', '%s.mif' % i.prefix) + ' ' + \ - '-force') - progress.increment() - progress.done() - run.command ('mrmath ' + ' '.join(allindir('masks_transformed')) + ' min - | maskfilter - median - | maskfilter - dilate -npass 5 init_nl_template_mask.mif -force') - current_template_mask = 'init_nl_template_mask.mif' - -if dononlinear: - # Optimise the template with non-linear registration - file.makeDir('warps') - for level, (scale, niter, lmax) in enumerate(zip(nl_scales, nl_niter, nl_lmax)): - progress = app.progressBar('Optimising template with non-linear registration (stage {0} of {1})'.format(level+1, len(nl_scales)), len(inputs)) - for i in inputs: - if level > 0: - initialise_option = ' -nl_init ' + os.path.join('warps_%i' % (level-1), '%s.mif' % i.prefix) - scale_option = '' - else: - scale_option = ' -nl_scale ' + str(scale) - if not doaffine: # rigid or no previous linear stage - initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms', '%s.txt' % i.prefix) - else: - initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms', '%s.txt' % i.prefix) + if nanmask_input: + inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], + [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) - if useMasks: - mask_option = ' -mask1 ' + abspath(i.mask_directory, i.mask_filename) + ' -mask2 ' + current_template_mask - else: - mask_option = '' + if leave_one_out: + calculate_isfinite(ins, cns) - if do_fod_registration: - lmax_option = ' -nl_lmax ' + str(lmax) - else: - lmax_option = ' -noreorientation' + if not dolinear: + for inp in ins: + with open(os.path.join('linear_transforms_initial', inp.uid + '.txt'), 'w') as fout: + fout.write('1 0 0 0\n0 1 0 0\n0 0 1 0\n0 0 0 1\n') - run.command('mrregister ' + abspath(i.directory, i.filename) + ' ' + current_template + ' ' + \ - '-type nonlinear ' + \ - '-nl_niter ' + str(niter) + ' ' + \ - '-nl_warp_full ' + os.path.join('warps_%i' % level, '%s.mif' % i.prefix) + ' ' + \ - '-transformed ' + os.path.join('inputs_transformed', '%s.mif' % i.prefix) + ' ' + \ - '-nl_update_smooth ' + app.args.nl_update_smooth + ' ' + \ - '-nl_disp_smooth ' + app.args.nl_disp_smooth + ' ' + \ - '-nl_grad_step ' + app.args.nl_grad_step + ' ' + \ - '-force' + \ - initialise_option + \ - scale_option + \ - mask_option + \ - datatype_option + \ - lmax_option) - - if level > 0: - run.function(remove, os.path.join('warps_%i'%(level-1), '%s.mif' % i.prefix)) - if useMasks: - run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) + ' ' + \ - '-template ' + current_template + ' ' + \ - '-warp_full ' + os.path.join('warps_%i' % level, '%s.mif' % i.prefix) + ' ' + \ - os.path.join('masks_transformed', i.prefix + '.mif') + ' ' + \ - '-interp nearest ' + - '-force') + run.function(copy, 'average_header' + cns.suff[0] + '.mif', 'average_header.mif') + + else: + progress = app.ProgressBar('Performing initial rigid registration to template', len(ins)) + mask_option = '' + cid = 0 + lmax_option = ' -rigid_lmax 0 ' if cns.fod_reorientation[cid] else ' -noreorientation ' + contrast_weight_option = cns.initial_alignment_weight_option + for inp in ins: + output_option = ' -rigid ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) + if use_masks: + mask_option = ' -mask1 ' + inp.msk_path + if initial_alignment == 'robust_mass': + if not os.path.isfile('robust/template.mif'): + if cns.n_volumes[cid] > 0: + run.command('mrconvert ' + cns.templates[cid] + ' -coord 3 0 - | mrconvert - -axes 0,1,2 robust/template.mif') + else: + run.command('mrconvert ' + cns.templates[cid] + ' robust/template.mif') + if n_contrasts > 1: + cmd = ['mrcalc', inp.ims_path[cid], cns.mc_weight_initial_alignment[cid], '-mult'] + for cid in range(1, n_contrasts): + cmd += [inp.ims_path[cid], cns.mc_weight_initial_alignment[cid], '-mult', '-add'] + contrast_weight_option = '' + run.command(' '.join(cmd) + + ' - | mrfilter - zclean -zlower 3 -zupper 3 robust/image_' + inp.uid + '.mif' + ' -maskin ' + inp.msk_path + ' -maskout robust/mask_' + inp.uid + '.mif') + else: + run.command('mrfilter ' + inp.ims_path[0] + ' zclean -zlower 3 -zupper 3 robust/image_' + inp.uid + '.mif' + + ' -maskin ' + inp.msk_path + ' -maskout robust/mask_' + inp.uid + '.mif') + images = 'robust/image_' + inp.uid + '.mif robust/template.mif' + mask_option = ' -mask1 ' + 'robust/mask_' + inp.uid + '.mif' + lmax_option = '' + + run.command('mrregister ' + images + + mask_option + + ' -rigid_scale 1 ' + + ' -rigid_niter 0 ' + + ' -type rigid ' + + lmax_option + + contrast_weight_option + + ' -rigid_init_translation ' + initial_alignment.replace('robust_', '') + ' ' + + datatype_option + + output_option) + # translate input images to centre of mass without interpolation + for cid in range(n_contrasts): + run.command('mrtransform ' + inp.ims_path[cid] + c_mrtransform_reorientation[cid] + + ' -linear ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + + ' ' + inp.ims_transformed[cid] + "_translated.mif" + datatype_option) + if use_masks: + run.command('mrtransform ' + inp.msk_path + + ' -linear ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + + ' ' + inp.msk_transformed + "_translated.mif" + + datatype_option) progress.increment() + # update average space of first contrast to new extent, delete other average space images + run.command('mraverageheader ' + ' '.join([inp.ims_transformed[cid] + '_translated.mif' for inp in ins]) + ' average_header_tight.mif') progress.done() - run.command('mrmath ' + ' '.join(allindir('inputs_transformed')) + ' mean nl_template' + str(level) + '.mif') - current_template = 'nl_template' + str(level) + '.mif' + if voxel_size is None: + run.command('mrgrid average_header_tight.mif pad -uniform 10 average_header.mif', force=True) + else: + run.command('mrgrid average_header_tight.mif pad -uniform 10 - | ' + 'mrgrid - regrid -voxel ' + ','.join(map(str, voxel_size)) + ' average_header.mif', force=True) + run.function(os.remove, 'average_header_tight.mif') + for cid in range(1, n_contrasts): + run.function(os.remove, 'average_header' + cns.suff[cid] + '.mif') + + if use_masks: + # reslice masks + progress = app.ProgressBar('Reslicing input masks to average header', len(ins)) + for inp in ins: + run.command('mrtransform ' + inp.msk_transformed + '_translated.mif' + ' ' + inp.msk_transformed + ' ' + + '-interp nearest -template average_header.mif' + datatype_option) + progress.increment() + progress.done() + # crop average space to extent defined by translated masks + run.command(['mrmath', [inp.msk_transformed for inp in ins], 'max', 'mask_translated.mif']) + run.command('mrgrid average_header.mif crop -mask mask_translated.mif average_header_cropped.mif') + # pad average space to allow for deviation from initial alignment + run.command('mrgrid average_header_cropped.mif pad -uniform 10 average_header.mif', force=True) + run.function(os.remove, 'average_header_cropped.mif') + # reslice masks + progress = app.ProgressBar('Reslicing masks to new padded average header', len(ins)) + for inp in ins: + run.command('mrtransform ' + inp.msk_transformed + '_translated.mif ' + inp.msk_transformed + ' ' + + '-interp nearest -template average_header.mif' + datatype_option, force=True) + run.function(os.remove, inp.msk_transformed + '_translated.mif') + progress.increment() + progress.done() + run.function(os.remove, 'mask_translated.mif') + + # reslice images + progress = app.ProgressBar('Reslicing input images to average header', len(ins) * n_contrasts) + for cid in range(n_contrasts): + for inp in ins: + run.command('mrtransform ' + c_mrtransform_reorientation[cid] + inp.ims_transformed[cid] + '_translated.mif ' + + inp.ims_transformed[cid] + ' ' + + ' -interp linear -template average_header.mif' + + outofbounds_option + + datatype_option) + run.function(os.remove, inp.ims_transformed[cid] + '_translated.mif') + progress.increment() + progress.done() - if useMasks: - run.command('mrmath ' + ' '.join(allindir('masks_transformed')) + ' min - | maskfilter - median - | maskfilter - dilate -npass 5 nl_template_mask' + str(level) + '.mif') - current_template_mask = 'nl_template_mask' + str(level) + '.mif' + if nanmask_input: + inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], + [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) - if level < len(nl_scales) - 1: - if scale < nl_scales[level + 1]: - upsample_factor = nl_scales[level + 1] / scale - for i in inputs: - run.command('mrresize ' + os.path.join('warps_%i' % level, '%s.mif' % i.prefix) + ' -scale %f tmp.mif' % upsample_factor) - run.function(move, 'tmp.mif', os.path.join('warps_' + str(level), '%s.mif' % i.prefix)) - else: - for i in inputs: - run.function(move, os.path.join('warps_' + str(level), '%s.mif' % i.prefix), 'warps') + if leave_one_out: + calculate_isfinite(ins, cns) + cns.templates = ['initial_template' + contrast + '.mif' for contrast in cns.suff] + for cid in range(n_contrasts): + aggregate(ins, 'initial_template' + cns.suff[cid] + '.mif', cid, agg_measure) + if cns.n_volumes[cid] == 1: + run.function(shutil.move, 'initial_template' + cns.suff[cid] + '.mif', 'tmp.mif') + run.command('mrconvert tmp.mif initial_template' + cns.suff[cid] + '.mif -axes 0,1,2,-1') + + # Optimise template with linear registration + if not dolinear: + for inp in ins: + run.function(copy, os.path.join('linear_transforms_initial', inp.uid+'.txt'), + os.path.join('linear_transforms', inp.uid+'.txt')) + else: + level = 0 + regtype = linear_type[0] + def linear_msg(): + return 'Optimising template with linear registration (stage {0} of {1}; {2})'.format(level + 1, len(linear_scales), regtype) + progress = app.ProgressBar(linear_msg, len(linear_scales) * len(ins) * (1 + n_contrasts + int(use_masks))) + for level, (regtype, scale, niter, lmax) in enumerate(zip(linear_type, linear_scales, linear_niter, linear_lmax)): + for inp in ins: + initialise_option = '' + if use_masks: + mask_option = ' -mask1 ' + inp.msk_path + else: + mask_option = '' + lmax_option = ' -noreorientation' + metric_option = '' + mrregister_log_option = '' + if regtype == 'rigid': + scale_option = ' -rigid_scale ' + str(scale) + niter_option = ' -rigid_niter ' + str(niter) + regtype_option = ' -type rigid' + output_option = ' -rigid ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + contrast_weight_option = cns.rigid_weight_option + if level > 0: + initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms_%02i' % (level - 1), inp.uid + '.txt') + if do_fod_registration: + lmax_option = ' -rigid_lmax ' + str(lmax) + if linear_estimator: + metric_option = ' -rigid_metric.diff.estimator ' + linear_estimator + if app.VERBOSITY >= 2: + mrregister_log_option = ' -info -rigid_log ' + os.path.join('log', inp.uid + contrast[cid] + "_" + str(level) + '.log') + else: + scale_option = ' -affine_scale ' + str(scale) + niter_option = ' -affine_niter ' + str(niter) + regtype_option = ' -type affine' + output_option = ' -affine ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + contrast_weight_option = cns.affine_weight_option + if level > 0: + initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms_%02i' % (level - 1), inp.uid + '.txt') + if do_fod_registration: + lmax_option = ' -affine_lmax ' + str(lmax) + if linear_estimator: + metric_option = ' -affine_metric.diff.estimator ' + linear_estimator + if write_log: + mrregister_log_option = ' -info -affine_log ' + os.path.join('log', inp.uid + contrast[cid] + "_" + str(level) + '.log') + + if leave_one_out: + tmpl = [] + for cid in range(n_contrasts): + isfinite = 'isfinite%s/%s.mif' % (cns.suff[cid], inp.uid) + weight = inp.aggregation_weight if inp.aggregation_weight is not None else '1' + # loo = (template * weighted sum - weight * this) / (weighted sum - weight) + run.command('mrcalc ' + cns.isfinite_count[cid] + ' ' + isfinite + ' -sub - | mrcalc ' + cns.templates[cid] + + ' ' + cns.isfinite_count[cid] + ' -mult ' + inp.ims_transformed[cid] + ' ' + weight + ' -mult ' + + ' -sub - -div loo_%s' % cns.templates[cid], force=True) + tmpl.append('loo_%s' % cns.templates[cid]) + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, tmpl)]) + else: + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) + command = 'mrregister ' + images + \ + initialise_option + \ + mask_option + \ + scale_option + \ + niter_option + \ + lmax_option + \ + regtype_option + \ + metric_option + \ + datatype_option + \ + contrast_weight_option + \ + output_option + \ + mrregister_log_option + run.command(command, force=True) + check_linear_transformation(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt'), command, + pause_on_warn=do_pause_on_warn) + if leave_one_out: + for im_temp in tmpl: + run.function(os.remove, im_temp) + progress.increment() + + # Here we ensure the template doesn't drift or scale + # TODO matrix avarage might produce a large FOV for large rotations # pylint: disable=fixme + run.command('transformcalc ' + ' '.join(path.all_in_dir('linear_transforms_%02i' % level)) + + ' average linear_transform_average.txt -quiet', force=True) + if linear_type[level] == 'rigid': + run.command('transformcalc linear_transform_average.txt rigid linear_transform_average.txt -quiet', force=True) + run.command('transformcalc linear_transform_average.txt invert linear_transform_average_inv.txt -quiet', force=True) + + average_inv = matrix.load_transform('linear_transform_average_inv.txt') + if average_inv is not None: + for inp in ins: + transform = matrix.dot(matrix.load_transform(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt')), average_inv) + matrix.save_transform(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt'), transform, force=True) + + for cid in range(n_contrasts): + for inp in ins: + run.command('mrtransform ' + c_mrtransform_reorientation[cid] + inp.ims_path[cid] + + ' -template ' + cns.templates[cid] + + ' -linear ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + + ' ' + inp.ims_transformed[cid] + + outofbounds_option + + datatype_option, + force=True) + progress.increment() + + if use_masks: + for inp in ins: + run.command('mrtransform ' + inp.msk_path + + ' -template ' + cns.templates[0] + + ' -interp nearest' + + ' -linear ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + + ' ' + inp.msk_transformed, + force=True) + progress.increment() + + if nanmask_input: + inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], + [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) + + if leave_one_out: + calculate_isfinite(ins, cns) + + for cid in range(n_contrasts): + cns.templates[cid] = 'linear_template%02i%s.mif' % (level, cns.suff[cid]) + aggregate(ins, cns.templates[cid], cid, agg_measure) + if cns.n_volumes[cid] == 1: + run.function(shutil.move, cns.templates[cid], 'tmp.mif') + run.command('mrconvert tmp.mif ' + cns.templates[cid] + ' -axes 0,1,2,-1') + run.function(os.remove, 'tmp.mif') + + for entry in os.listdir('linear_transforms_%02i' % level): + run.function(copy, os.path.join('linear_transforms_%02i' % level, entry), os.path.join('linear_transforms', entry)) + progress.done() -run.command('mrconvert ' + current_template + ' ' + path.fromUser(app.args.template, True) + (' -force' if app.forceOverwrite else '')) + # Create a template mask for nl registration by taking the intersection of all transformed input masks and dilating + if use_masks and (dononlinear or app.ARGS.template_mask): + run.command(['mrmath', path.all_in_dir('mask_transformed')] + + 'min - | maskfilter - median - | maskfilter - dilate -npass 5 init_nl_template_mask.mif'.split(), force=True) + current_template_mask = 'init_nl_template_mask.mif' + + if dononlinear: + path.make_dir('warps') + level = 0 + def nonlinear_msg(): + return 'Optimising template with non-linear registration (stage {0} of {1})'.format(level + 1, len(nl_scales)) + progress = app.ProgressBar(nonlinear_msg, len(nl_scales) * len(ins)) + for level, (scale, niter, lmax) in enumerate(zip(nl_scales, nl_niter, nl_lmax)): + for inp in ins: + if level > 0: + initialise_option = ' -nl_init ' + os.path.join('warps_%02i' % (level - 1), inp.uid + '.mif') + scale_option = '' + else: + scale_option = ' -nl_scale ' + str(scale) + if not doaffine: # rigid or no previous linear stage + initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms', inp.uid + '.txt') + else: + initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms', inp.uid + '.txt') + + if use_masks: + mask_option = ' -mask1 ' + inp.msk_path + ' -mask2 ' + current_template_mask + else: + mask_option = '' -if app.args.warp_dir: - warp_path = path.fromUser(app.args.warp_dir, False) - if os.path.exists(warp_path): - run.function(rmtree, warp_path) - run.function(copytree, 'warps', warp_path) + if do_fod_registration: + lmax_option = ' -nl_lmax ' + str(lmax) + else: + lmax_option = ' -noreorientation' -if app.args.linear_transformations_dir: - linear_transformations_path = path.fromUser(app.args.linear_transformations_dir, False) - if os.path.exists(linear_transformations_path): - run.function(rmtree, linear_transformations_path) - run.function(copytree, 'linear_transforms', linear_transformations_path) + contrast_weight_option = cns.nl_weight_option + + if leave_one_out: + tmpl = [] + for cid in range(n_contrasts): + isfinite = 'isfinite%s/%s.mif' % (cns.suff[cid], inp.uid) + weight = inp.aggregation_weight if inp.aggregation_weight is not None else '1' + # loo = (template * weighted sum - weight * this) / (weighted sum - weight) + run.command('mrcalc ' + cns.isfinite_count[cid] + ' ' + isfinite + ' -sub - | mrcalc ' + cns.templates[cid] + + ' ' + cns.isfinite_count[cid] + ' -mult ' + inp.ims_transformed[cid] + ' ' + weight + ' -mult ' + + ' -sub - -div loo_%s' % cns.templates[cid], force=True) + tmpl.append('loo_%s' % cns.templates[cid]) + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, tmpl)]) + else: + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) + run.command('mrregister ' + images + + ' -type nonlinear' + + ' -nl_niter ' + str(nl_niter[level]) + + ' -nl_warp_full ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + + ' -transformed ' + + ' -transformed '.join([inp.ims_transformed[cid] for cid in range(n_contrasts)]) + ' ' + + ' -nl_update_smooth ' + app.ARGS.nl_update_smooth + + ' -nl_disp_smooth ' + app.ARGS.nl_disp_smooth + + ' -nl_grad_step ' + app.ARGS.nl_grad_step + + initialise_option + + contrast_weight_option + + scale_option + + mask_option + + datatype_option + + outofbounds_option + + lmax_option, + force=True) + + if use_masks: + run.command('mrtransform ' + inp.msk_path + + ' -template ' + cns.templates[0] + + ' -warp_full ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + + ' ' + inp.msk_transformed + + ' -interp nearest ', + force=True) + + if leave_one_out: + for im_temp in tmpl: + run.function(os.remove, im_temp) -if app.args.transformed_dir: - transformed_path = path.fromUser(app.args.transformed_dir, False) - if os.path.exists(transformed_path): - run.function(rmtree, transformed_path) - run.function(copytree, 'inputs_transformed', transformed_path) + if level > 0: + run.function(os.remove, os.path.join('warps_%02i' % (level - 1), inp.uid + '.mif')) + + progress.increment(nonlinear_msg()) + + if nanmask_input: + inplace_nan_mask([_inp.ims_transformed[cid] for _inp in ins for cid in range(n_contrasts)], + [_inp.msk_transformed for _inp in ins for cid in range(n_contrasts)]) + + if leave_one_out: + calculate_isfinite(ins, cns) + + for cid in range(n_contrasts): + cns.templates[cid] = 'nl_template%02i%s.mif' % (level, cns.suff[cid]) + aggregate(ins, cns.templates[cid], cid, agg_measure) + if cns.n_volumes[cid] == 1: + run.function(shutil.move, cns.templates[cid], 'tmp.mif') + run.command('mrconvert tmp.mif ' + cns.templates[cid] + ' -axes 0,1,2,-1') + run.function(os.remove, 'tmp.mif') + + if use_masks: + run.command(['mrmath', path.all_in_dir('mask_transformed')] + + 'min - | maskfilter - median - | '.split() + + ('maskfilter - dilate -npass 5 nl_template_mask' + str(level) + '.mif').split()) + current_template_mask = 'nl_template_mask' + str(level) + '.mif' + + if level < len(nl_scales) - 1: + if scale < nl_scales[level + 1]: + upsample_factor = nl_scales[level + 1] / scale + for inp in ins: + run.command('mrgrid ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + + ' regrid -scale %f tmp.mif' % upsample_factor, force=True) + run.function(shutil.move, 'tmp.mif', os.path.join('warps_%02i' % level, inp.uid + '.mif')) + else: + for inp in ins: + run.function(shutil.move, os.path.join('warps_%02i' % level, inp.uid + '.mif'), 'warps') + progress.done() -if app.args.template_mask: - run.command('mrconvert ' + current_template_mask + ' ' + path.fromUser(app.args.template_mask, True) + (' -force' if app.forceOverwrite else '')) + for cid in range(n_contrasts): + run.command('mrconvert ' + cns.templates[cid] + ' ' + cns.templates_out[cid], + mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) + + if app.ARGS.warp_dir: + warp_path = path.from_user(app.ARGS.warp_dir, False) + if os.path.exists(warp_path): + run.function(shutil.rmtree, warp_path) + os.makedirs(warp_path) + progress = app.ProgressBar('Copying non-linear warps to output directory "' + warp_path + '"', len(ins)) + for inp in ins: + keyval = image.Header(os.path.join('warps', inp.uid + '.mif')).keyval() + keyval = dict((k, keyval[k]) for k in ('linear1', 'linear2')) + json_path = os.path.join('warps', inp.uid + '.json') + with open(json_path, 'w') as json_file: + json.dump(keyval, json_file) + run.command('mrconvert ' + os.path.join('warps', inp.uid + '.mif') + ' ' + + path.quote(os.path.join(warp_path, xcontrast_xsubject_pre_postfix[0] + + inp.uid + xcontrast_xsubject_pre_postfix[1] + '.mif')), + mrconvert_keyval=json_path, force=app.FORCE_OVERWRITE) + progress.increment() + progress.done() -app.complete() + if app.ARGS.linear_transformations_dir: + linear_transformations_path = path.from_user(app.ARGS.linear_transformations_dir, False) + if os.path.exists(linear_transformations_path): + run.function(shutil.rmtree, linear_transformations_path) + os.makedirs(linear_transformations_path) + for inp in ins: + trafo = matrix.load_transform(os.path.join('linear_transforms', inp.uid + '.txt')) + matrix.save_transform(os.path.join(linear_transformations_path, + xcontrast_xsubject_pre_postfix[0] + inp.uid + + xcontrast_xsubject_pre_postfix[1] + '.txt'), + trafo, + force=app.FORCE_OVERWRITE) + + if app.ARGS.transformed_dir: + for cid, trdir in enumerate(app.ARGS.transformed_dir): + transformed_path = path.from_user(trdir, False) + if os.path.exists(transformed_path): + run.function(shutil.rmtree, transformed_path) + os.makedirs(transformed_path) + progress = app.ProgressBar('Copying transformed images to output directory "' + transformed_path + '"', len(ins)) + for inp in ins: + run.command(['mrconvert', inp.ims_transformed[cid], os.path.join(transformed_path, inp.ims_filenames[cid])], + mrconvert_keyval=inp.get_ims_path(False)[cid], force=app.FORCE_OVERWRITE) + progress.increment() + progress.done() + + if app.ARGS.template_mask: + run.command('mrconvert ' + current_template_mask + ' ' + path.from_user(app.ARGS.template_mask, True), + mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/responsemean b/bin/responsemean new file mode 100755 index 0000000000..96300d4539 --- /dev/null +++ b/bin/responsemean @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + + +import math, os, sys + + + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') + cmdline.set_synopsis('Calculate the mean response function from a set of text files') + cmdline.add_description('Example usage: ' + os.path.basename(sys.argv[0]) + ' input_response1.txt input_response2.txt input_response3.txt ... output_average_response.txt') + cmdline.add_description('All response function files provided must contain the same number of unique b-values (lines), as well as the same number of coefficients per line.') + cmdline.add_description('As long as the number of unique b-values is identical across all input files, the coefficients will be averaged. This is performed on the assumption that the actual acquired b-values are identical. This is however impossible for the ' + os.path.basename(sys.argv[0]) + ' command to determine based on the data provided; it is therefore up to the user to ensure that this requirement is satisfied.') + cmdline.add_argument('inputs', help='The input response functions', nargs='+') + cmdline.add_argument('output', help='The output mean response function') + cmdline.add_argument('-legacy', action='store_true', help='Use the legacy behaviour of former command \'average_response\': average response function coefficients directly, without compensating for global magnitude differences between input files') + + + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import app, matrix #pylint: disable=no-name-in-module, import-outside-toplevel + + app.check_output_path(app.ARGS.output) + + data = [ ] # 3D matrix: Subject, b-value, ZSH coefficient + for filepath in app.ARGS.inputs: + subject = matrix.load_matrix(filepath) + if any(len(line) != len(subject[0]) for line in subject[1:]): + raise MRtrixError('File \'' + filepath + '\' does not contain the same number of entries per line (multi-shell response functions must have the same number of coefficients per b-value; pad the data with zeroes if necessary)') + if data: + if len(subject) != len(data[0]): + raise MRtrixError('File \'' + filepath + '\' contains ' + str(len(subject)) + ' b-value' + ('s' if len(subject) > 1 else '') + ' (line' + ('s' if len(subject) > 1 else '') + '); this differs from the first file read (' + sys.argv[1] + '), which contains ' + str(len(data[0])) + ' line' + ('s' if len(data[0]) > 1 else '')) + if len(subject[0]) != len(data[0][0]): + raise MRtrixError('File \'' + filepath + '\' contains ' + str(len(subject[0])) + ' coefficient' + ('s' if len(subject[0]) > 1 else '') + ' per b-value (line); this differs from the first file read (' + sys.argv[1] + '), which contains ' + str(len(data[0][0])) + ' coefficient' + ('s' if len(data[0][0]) > 1 else '') + ' per line') + data.append(subject) + + app.console('Calculating mean RF across ' + str(len(data)) + ' inputs, with ' + str(len(data[0])) + ' b-value' + ('s' if len(data[0])>1 else '') + ' and lmax=' + str(2*(len(data[0][0])-1))) + + # Old approach: Just take the average across all subjects + # New approach: Calculate a multiplier to use for each subject, based on the geometric mean + # scaling factor required to bring the subject toward the group mean l=0 terms (across shells) + + mean_lzero_terms = [ sum([ subject[row][0] for subject in data ])/len(data) for row in range(len(data[0])) ] + app.debug('Mean l=0 terms: ' + str(mean_lzero_terms)) + + weighted_sum_coeffs = [[0.0] * len(data[0][0]) for _ in range(len(data[0]))] #pylint: disable=unused-variable + for subject in data: + if app.ARGS.legacy: + multiplier = 1.0 + else: + subj_lzero_terms = [line[0] for line in subject] + log_multiplier = 0.0 + for subj_lzero, mean_lzero in zip(subj_lzero_terms, mean_lzero_terms): + log_multiplier += math.log(mean_lzero / subj_lzero) + log_multiplier /= len(data[0]) + multiplier = math.exp(log_multiplier) + app.debug('Subject l=0 terms: ' + str(subj_lzero_terms)) + app.debug('Resulting multipler: ' + str(multiplier)) + weighted_sum_coeffs = [ [ a + multiplier*b for a, b in zip(linea, lineb) ] for linea, lineb in zip(weighted_sum_coeffs, subject) ] + + mean_coeffs = [ [ f/len(data) for f in line ] for line in weighted_sum_coeffs ] + matrix.save_matrix(app.ARGS.output, mean_coeffs, force=app.FORCE_OVERWRITE) + + + + +# Execute the script +import mrtrix3 +mrtrix3.execute() #pylint: disable=no-member diff --git a/build b/build index 8ca3248b63..7e9f8ee4f3 100755 --- a/build +++ b/build @@ -1,5 +1,22 @@ #!/usr/bin/env python +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# pylint: disable=redefined-outer-name,invalid-name + usage_string = ''' USAGE @@ -63,6 +80,9 @@ OPTIONS -nowarnings do not print out non-fatal compiler messages (warnings, etc) + -dryrun + do not actually execute the compiler and linking commands (used for testing) + -showdep[=target|all] print the list of dependencies for every target (with -showdep=target; the default) or for all files @@ -74,8 +94,12 @@ OPTIONS keep trying to build regardless of failures, until none of the remaining jobs succeed. + -timings + write compile times out to timings.log file. + -nopaginate do not feed error log to the paginator, even if running in a TTY + ''' @@ -84,15 +108,14 @@ OPTIONS # COMMON DEFINITIONS # ############################################################################ -import platform, sys, os, time, threading, subprocess, copy, codecs, glob, atexit, tempfile, shutil +import atexit, codecs, copy, glob, os, platform, re, shutil, subprocess, sys, tempfile, time, threading +from timeit import default_timer as timer # on Windows, need to use MSYS2 version of python - not MinGW version: if sys.executable[0].isalpha() and sys.executable[1] == ':': - python_cmd = subprocess.check_output ([ 'cygpath.exe', '-w', '/usr/bin/python' ]).splitlines()[0].strip() - sys.exit (subprocess.call ([ python_cmd.decode(errors='ignore') ] + sys.argv)) + python_cmd = subprocess.check_output ([ 'cygpath.exe', '-w', '/usr/bin/python' ]).decode(errors='ignore').splitlines()[0].strip() + sys.exit (subprocess.call ([ python_cmd ] + sys.argv)) -global todo, headers, object_deps, file_flags, lock, print_lock, stop, main_cindex -global include_paths bin_dir = 'bin' cmd_dir = 'cmd' @@ -113,11 +136,11 @@ system = None dependencies = 0 dep_recursive = False verbose = False +dryrun = False persistent = False nowarnings = False paginate = True targets = [] -use_multiple_cores = True todo, headers, object_deps, file_flags = {}, {}, {}, {} @@ -128,6 +151,7 @@ error_stream = None main_cindex = 0 logfile = open ('build.log', 'wb') +timingfile = None bcolors = { @@ -155,15 +179,14 @@ bcolors = { - def colorize(s): - out = [] + out = '' for l in s.splitlines(): - for st in bcolors.keys(): + for st in bcolors: if st in l: l = l.replace (st, bcolors[st] + st) + '\033[0m' break - out.append(l + '\n') + out += l + '\n' return out @@ -171,21 +194,13 @@ def colorize(s): def pipe_errors_to_less_handler(): global error_stream - if len (error_stream): - try: - [ fid, name ] = tempfile.mkstemp() - try: - fid = os.fdopen (fid, 'wb') - [ fid.write (x.encode (errors='ignore')) for x in colorize(error_stream) ] - fid.close() - os.system ("less -RX " + name) - except Exception as e: - sys.stderr.write (str (e)) - os.unlink (name) - except Exception as e: - sys.stderr.write (str (e)) - except: - raise + if error_stream: + [ fid, name ] = tempfile.mkstemp() + fid = os.fdopen (fid, 'wb') + fid.write (colorize(error_stream).encode (errors='ignore')) + fid.close() + os.system ("less -RX " + name) + os.unlink (name) @@ -196,15 +211,28 @@ def disp (msg): print_lock.acquire() logfile.write (msg.encode (errors='ignore')) sys.stdout.write (msg) + sys.stdout.flush() print_lock.release() + def log (msg): print_lock.acquire() logfile.write (msg.encode (errors='ignore')) if verbose: - sys.stderr.write (msg) + sys.stdout.write (msg) + sys.stdout.flush() + print_lock.release() + + + +def logtime (msg): + print_lock.acquire() + timingfile.write (msg.encode (errors='ignore')) + timingfile.flush() print_lock.release() + + def error (msg): global error_stream print_lock.acquire() @@ -212,35 +240,97 @@ def error (msg): if error_stream is not None: error_stream += msg else: - sys.stderr.write (msg) + sys.stdout.write (msg) + sys.stdout.flush() print_lock.release() +def fail (msg): + print_lock.acquire() + logfile.write (msg.encode (errors='ignore')) + sys.stdout.write (msg) + sys.stdout.flush() + print_lock.release() + sys.exit (1) + + -def split_path (filename): - return filename.replace ('\\', '/').split ('/') +def split_path (path): + return path.replace ('\\', '/').split ('/') def get_real_name (path): - if os.path.islink (path): return os.readlink (path) - else: return (path) + if os.path.islink (path): + return os.readlink (path) + return path +def _get_expected_bin_filenames (exe_suffix, cmd_directory=cmd_dir): + binary_apps = { 'mrtrix3.pyc' } + if exe_suffix: + binary_apps.add('mrtrix.dll') + for entry in os.listdir(cmd_directory): + if entry.endswith(cpp_suffix): + binary_apps.add (entry[:-len(cpp_suffix)] + exe_suffix) + return binary_apps + + +def list_expected_bin_files (exe_suffix, cmd_directory=cmd_dir, bin_directory=bin_dir): + bin_files = [] + binary_apps = _get_expected_bin_filenames(exe_suffix, cmd_directory=cmd_directory) + for app in sorted(binary_apps): + if os.path.isfile(os.path.join(bin_directory, app)): + bin_files.append(os.path.join(bin_directory, app)) + return bin_files +def list_unexpected_bin_files (exe_suffix, cmd_directory=cmd_dir, bin_directory=bin_dir): + unexpected = [] + if os.path.isdir(bin_directory): + binary_apps = _get_expected_bin_filenames(exe_suffix, cmd_directory=cmd_directory) + for filename in sorted(os.listdir(bin_directory)): + filepath = os.path.normpath(os.path.join(bin_directory, filename)) + if os.path.isfile(filepath) and filename not in binary_apps and not is_likely_text(filepath): + unexpected.append(filepath) + return unexpected -def list_untracked_bin_files (folder = '.'): - process = subprocess.Popen ([ 'git', '-C', folder, 'ls-files', '-x', '.*', '-o', bin_dir ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - ( files, stderr ) = process.communicate() + +def list_untracked_bin_files (directory = '.'): + process = subprocess.Popen ([ 'git', '-C', directory, 'ls-files', '-x', '.*', '-o', bin_dir ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + filelist = process.communicate()[0] if process.returncode == 0: - return files.decode(errors='ignore').splitlines() - else: - return [] + return filelist.decode(errors='ignore').splitlines() + return [] +def is_likely_text(filename, nbytes=1024): + """ detects if file has a shebang or is utf-8 encoded, empty file is deemed non-text + :arg filename + :arg nbytes sample size to analyse + """ + chunk = b'' + try: + with open(filename, 'rb') as f: + chunk = f.read(nbytes) + except IOError as e: + log('could not read file ' + filename + '\n' + str(e) + '\n') + + if chunk: + if chunk.startswith(b'\x23\x21'): # starts with shebang '#!' + log('is_likely_text: ' + filename + ': starts with shebang \n') + return True + try: + chunk.decode('utf-8') + log('is_likely_text: ' + filename + 'utf-8 encoded \n') + return True + except UnicodeDecodeError: + pass + return False -def modify_path (name, tmp=False, in_main=False, strip=None, add=None): + + +def modify_path (name, tmp=False, strip=None, add=None): if strip is not None: name = name[:-len(strip)] if add is not None: @@ -259,6 +349,7 @@ def modify_path (name, tmp=False, in_main=False, strip=None, add=None): + ############################################################################ # COMMAND-LINE PARSING # ############################################################################ @@ -267,11 +358,13 @@ command_doc = False bash_completion = False for arg in sys.argv[1:]: - if '-help'.startswith (arg): - print (usage_string) - exit (0) + if '-help'.startswith(arg): + sys.stdout.write (usage_string) + sys.exit (0) elif '-verbose'.startswith(arg): verbose = True + elif '-dryrun'.startswith(arg): + dryrun = True elif '-persistent'.startswith(arg): persistent = True elif '-nowarnings'.startswith(arg): @@ -279,18 +372,20 @@ for arg in sys.argv[1:]: elif '-showdep'.startswith(arg): dependencies = 1 elif arg.startswith ('-showdep='): - if arg[9:] == 'target': dependencies = 1 - elif arg[9:] == 'all': dependencies = 2 + if arg[9:] == 'target': + dependencies = 1 + elif arg[9:] == 'all': + dependencies = 2 else: - sys.stderr.write ('invalid specified for option "-showdep" (expected target, all)\n') - sys.exit (1) + fail ('invalid specified for option "-showdep" (expected target, all)') elif '-tree'.startswith(arg): dep_recursive = True + elif '-timings'.startswith(arg): + timingfile = open ('timings.log', 'wb') elif '-nopaginate'.startswith(arg): paginate = False elif arg[0] == '-': - sys.stderr.write ('unknown command-line option "' + arg + '"\n') - sys.exit (1) + fail ('unknown command-line option "' + arg + '"') elif arg == 'bash': bash_completion = True elif arg == 'doc': @@ -303,7 +398,7 @@ for arg in sys.argv[1:]: -if paginate and sys.stderr.isatty(): +if paginate and sys.stdout.isatty(): error_stream = '' atexit.register (pipe_errors_to_less_handler) @@ -336,86 +431,83 @@ while os.path.abspath (os.path.dirname (get_real_name (build_script))) != os.pat # CONFIG HANDLING # ############################################################################ -def get_active_build (folder): - active_config = glob.glob (os.path.join (folder, 'build.*.active')) - if len (active_config) > 1: - sys.stderr.write ('ERROR: more than one config is currently marked as active!\n') - sys.exit (1) - if len (active_config) == 1: - active_config = active_config[0] - if not os.path.isdir (active_config): - raise Exception ('ERROR: active config (' + active_config + ') is not a folder') - if len (os.listdir (active_config)): - raise Exception ('ERROR: active config folder (' + active_config + ') is not empty') - return active_config[:-len('.active')] - else: - os.mkdir (os.path.join (folder, 'build.default.active')) - return os.path.join (folder, 'build.default') +class ConfigException (Exception): + pass + +def get_active_build (directory): + active_configs = glob.glob (os.path.join (directory, 'build.*.active')) + if len (active_configs) > 1: + fail ('ERROR: more than one config is currently marked as active!') + if active_configs: + name = active_configs[0] + if not os.path.isdir (name): + raise ConfigException ('ERROR: active config (' + name + ') is not a directory') + if os.listdir (name): + raise ConfigException ('ERROR: active config directory (' + name + ') is not empty') + return name[:-len('.active')] + os.mkdir (os.path.join (directory, 'build.default.active')) + return os.path.join (directory, 'build.default') -def store_current_build (folder = '.'): - stored_config = get_active_build (folder) +def store_current_build (directory = '.'): + stored_config = get_active_build (directory) os.rename (stored_config + '.active', stored_config) - print ('in "' + folder + '": storing "' + stored_config + '"...') - for f in [ tmp_dir, 'config' ] + list_untracked_bin_files (folder) + glob.glob ('lib/libmrtrix*'): - if os.path.exists (os.path.join (folder, f)): - os.renames (os.path.join (folder, f), os.path.join (stored_config, f)) + disp ('in "' + directory + '": storing "' + stored_config + '"...\n') + for f in [ tmp_dir, 'config' ] + list_untracked_bin_files (directory) + glob.glob ('lib/libmrtrix*'): + if os.path.exists (os.path.join (directory, f)): + os.renames (os.path.join (directory, f), os.path.join (stored_config, f)) -def restore_build (name, folder = '.'): - stored_config = os.path.join (folder, 'build.' + name) - active_config = stored_config + '.active' - if os.path.isdir (stored_config): - print ('in "' + folder + '": restoring "' + stored_config + '"...') - os.rename (stored_config, active_config) - for root, dirs, files in os.walk(active_config, topdown=False): +def restore_build (config_name, directory = '.'): + stored_path = os.path.join (directory, 'build.' + config_name) + active_path = stored_path + '.active' + if os.path.isdir (stored_path): + disp ('in "' + directory + '": restoring "' + stored_path + '"...\n') + os.rename (stored_path, active_path) + for root, dirs, files in os.walk(active_path, topdown=False): for name in files: - os.renames (os.path.join (root, name), os.path.join (folder, os.path.relpath (root, active_config), name)) + os.renames (os.path.join (root, name), os.path.join (directory, os.path.relpath (root, active_path), name)) for name in dirs: if os.path.isdir (os.path.join (root, name)): os.rmdir (os.path.join(root, name)) else: - if os.path.exists (stored_config): - raise Exception ('ERROR config to be restored (' + stored_config + ') is not a folder') - print ('in "' + folder + '": creating empty "' + stored_config + '"...') - - if not os.path.isdir (active_config): - os.mkdir (active_config) - + if os.path.exists (stored_path): + raise ConfigException ('ERROR config to be restored (' + stored_path + ') is not a directory') + disp ('in "' + directory + '": creating empty "' + stored_path + '"...\n') + if not os.path.isdir (active_path): + os.mkdir (active_path) - -def activate_build (name, folders): - for folder in folders: - active_config = get_active_build (folder) - if os.path.realpath (active_config) == os.path.realpath (os.path.join (folder, 'build.' + name)): +def activate_build (name, directories): + for directory in directories: + config = get_active_build (directory) + if os.path.realpath (config) == os.path.realpath (os.path.join (directory, 'build.' + name)): continue - store_current_build (folder) - restore_build (name, folder) + store_current_build (directory) + restore_build (name, directory) -if len (targets) and targets[0] == 'select': +if targets and targets[0] == 'select': if len(targets) == 1: active_config = os.path.basename (get_active_build ('.')) - print ('current config is "' + active_config + '"') - for folder in mrtrix_dir[1:]: - other_config = os.path.basename (get_active_build (folder)) + disp ('current config is "' + active_config + '"\n') + for entry in mrtrix_dir[1:]: + other_config = os.path.basename (get_active_build (entry)) if active_config != other_config: - print ('WARNING: folder "' + folder + '" contains config "' + other_config + '"') + disp ('WARNING: directory "' + entry + '" contains config "' + other_config + '"\n') sys.exit (0) if len(targets) != 2: - sys.stderr.write ('ERROR: select target expects a single configuration name\n') - sys.exit (1) + fail ('ERROR: select target expects a single configuration name\n') activate_build (targets[1], mrtrix_dir) @@ -431,54 +523,11 @@ active_config = os.path.basename (get_active_build ('.'))[6:] log ('active config is ' + active_config + '\n\n') active_config_core = os.path.basename (get_active_build (mrtrix_dir[-1]))[6:] if active_config_core != active_config: - print ('active config differs from core - switching to core active config') + disp ('active config differs from core - switching to core active config\n') activate_build (active_config_core, mrtrix_dir) - -############################################################################ -# BUILD CLEAN # -############################################################################ - -if 'clean' in targets: - - for f in [ tmp_dir, 'dev' ]: - if not os.path.isdir (f): continue - for root, dirs, files in os.walk(f, topdown=False): - for name in files: - fname = os.path.join(root, name) - print ('delete file:', fname) - os.remove (fname) - for name in dirs: - print ('delete folder:', os.path.join (root, name)) - os.rmdir (os.path.join(root, name)) - print ('delete folder:', root) - os.rmdir (root) - - try: - for fname in list_untracked_bin_files(): - print ('delete file:', fname) - os.remove (fname) - except: - pass - - try: - for fname in glob.glob (os.path.join ('lib', 'libmrtrix*')): - if os.path.isfile (fname): - print ('delete file:', fname) - os.remove (fname) - except: - pass - - sys.exit (0) - - - - - - - ############################################################################ # LOAD CONFIGURATION FILE # ############################################################################ @@ -486,15 +535,26 @@ if 'clean' in targets: if config_file is None: config_file = os.path.normpath (os.path.join (mrtrix_dir[-1], 'config')) +# prevent pylint from generating undefined variable / non-iterable / membership test warnings +PATH = obj_suffix = exe_suffix = lib_prefix = lib_suffix = None +runpath = ld_enabled = moc = rcc = nogui = None +cpp = cpp_flags = ld = ld_flags = ld_lib = ld_lib_flags = eigen_cflags = qt_cflags = qt_ldflags = [ ] + try: log ('reading configuration from "' + config_file + '"...' + os.linesep) - exec (codecs.open (config_file, mode='r', encoding='utf-8').read()) + exec (codecs.open (config_file, mode='r', encoding='utf-8').read()) # pylint: disable=exec-used except IOError: - sys.stderr.write ('''no configuration file found! + fail ('''no configuration file found! +please run "./configure" prior to invoking this script + +''') + +# renamed internal string substitutions +if 'LDFLAGS' in ld or 'LDLIB_FLAGS' in ld_lib: + fail ('''configuration file is out of date! please run "./configure" prior to invoking this script ''') - sys.exit (1) if separate_project: cpp_flags += [ '-DMRTRIX_PROJECT' ] @@ -502,7 +562,8 @@ if separate_project: environ = os.environ.copy() environ.update ({ 'PATH': PATH }) -target_bin_dir = os.path.join (mrtrix_dir[0], bin_dir); +target_bin_dir = os.path.join (mrtrix_dir[0], bin_dir) +purged_bin_dir = os.path.join (target_bin_dir, 'purged_files') system = platform.system().lower() if system.startswith('mingw') or system.startswith('msys'): @@ -511,12 +572,73 @@ else: target_lib_dir = os.path.join (mrtrix_dir[-1], 'lib') lib_dir = os.path.join (mrtrix_dir[-1], lib_dir) -if ld_enabled and len(runpath): +if ld_enabled and runpath: ld_flags += [ runpath+os.path.relpath (target_lib_dir,target_bin_dir) ] +############################################################################ +# BUILD CLEAN # +############################################################################ + +if 'clean' in targets: + + for f in [ tmp_dir, 'dev' ]: + if not os.path.isdir (f): + continue + for root, dirs, files in os.walk(f, topdown=False): + for entry in files: + filename = os.path.join(root, entry) + disp ('delete file: ' + filename + '\n') + try: + os.remove (filename) + except OSError as excp: + disp ('error deleting file "' + filename + '": ' + os.strerror (excp.errno)) + for entry in dirs: + dirname = os.path.join(root, entry) + disp ('delete directory: ' + dirname + '\n') + try: + os.rmdir (dirname) + except OSError as excp: + disp ('error deleting folder "' + dirname + '": ' + os.strerror (excp.errno)) + disp ('delete directory: ' + f + '\n') + try: + os.rmdir (f) + except OSError as excp: + disp ('error deleting folder "' + f + '": ' + os.strerror (excp.errno)) + + for filename in list_expected_bin_files(exe_suffix): + disp ('delete file: ' + filename + '\n') + try: + os.remove (filename) + except OSError as excp: + disp ('error deleting file "' + filename + '": ' + os.strerror (excp.errno)) + + for filepath in list_unexpected_bin_files(exe_suffix): + if not os.path.exists(purged_bin_dir): + os.makedirs(purged_bin_dir) + move_to = os.path.join(purged_bin_dir, os.path.split(filepath)[1]) + while os.path.isfile(move_to): + move_to = move_to + '_' + disp ('WARNING: moving unexpected file ' + filepath + ' to ' + move_to + '\n') + try: + shutil.move(filepath, move_to) + except OSError as excp: + disp ('error moving file "' + filepath + '": ' + os.strerror (excp.errno)) + + for filename in glob.glob (os.path.join ('lib', 'libmrtrix*')): + if os.path.isfile (filename): + disp ('delete file: ' + filename + '\n') + try: + os.remove (filename) + except OSError as excp: + disp ('error deleting file "' + filename + '": ' + os.strerror (excp.errno)) + + sys.exit (0) + + + ############################################################################ # GET VERSION INFORMATION # @@ -549,16 +671,33 @@ for entry in glob.glob (os.path.normpath (os.path.join (target_lib_dir, '*' + li disp ('WARNING: removing "' + entry + '" - most likely left over from a previous installation\n') os.remove (entry) +# move unexpected binary files out of the target bin directory: +for filepath in list_unexpected_bin_files(exe_suffix): + if not os.path.exists(purged_bin_dir): + os.makedirs(purged_bin_dir) + move_to = os.path.join(purged_bin_dir, os.path.split(filepath)[1]) + while os.path.isfile(move_to): + move_to = move_to + '_' + disp ('WARNING: moving unexpected binary file ' + filepath + ' to ' + move_to + '\n') + try: + shutil.move(filepath, move_to) + except OSError as excp: + disp ('error moving file "' + filepath + '": ' + os.strerror (excp.errno)) + ########################################################################### -# TODO LIST ENTRY # +# TO-DO LIST ENTRY # ########################################################################### -class Entry: +class TargetException (Exception): + pass + +class Entry(object): def __init__ (self, name): global todo name = os.path.normpath (name) - if name in todo.keys(): return + if name in todo: + return todo[name] = self @@ -570,19 +709,24 @@ class Entry: self.dep_timestamp = self.timestamp self.currently_being_processed = False - if is_executable (self.name): self.set_executable() - elif is_icon (self.name): self.set_icon() - elif is_object (self.name): self.set_object() - elif is_library (self.name): self.set_library() - elif is_moc (self.name): self.set_moc() + if is_executable (self.name): + self.set_executable() + elif is_icon (self.name): + self.set_icon() + elif is_object (self.name): + self.set_object() + elif is_library (self.name): + self.set_library() + elif is_moc (self.name): + self.set_moc() elif not os.path.exists (self.name): - raise Exception ('unknown target "' + self.name + '"') + raise TargetException ('unknown target "' + self.name + '"') - [ Entry(item) for item in self.deps ] - dep_timestamp = [ todo[item].timestamp for item in todo.keys() if item in self.deps and not is_library(item) ] - dep_timestamp += [ todo[item].dep_timestamp for item in todo.keys() if item in self.deps and not is_library(item) ] - if len(dep_timestamp): + [ Entry(item) for item in self.deps ] # pylint: disable=expression-not-assigned + dep_timestamp = [ todo[item].timestamp for item in todo if item in self.deps and not is_library(item) ] + dep_timestamp += [ todo[item].dep_timestamp for item in todo if item in self.deps and not is_library(item) ] + if dep_timestamp: self.dep_timestamp = max(dep_timestamp) @@ -591,10 +735,9 @@ class Entry: folder = os.path.dirname (self.name) try: os.makedirs (folder) - except OSError as e: + except OSError as excp: if not os.path.isdir (folder): - error ('ERROR: can''t create target folder "' + folder + '": ' + os.strerror (e.errno)) - exit (1) + fail ('ERROR: can''t create target folder "' + folder + '": ' + os.strerror (excp.errno)) if self.action == 'RCC': with codecs.open (self.cmd[1], mode='w', encoding='utf-8') as fd: @@ -604,16 +747,17 @@ class Entry: if not entry.startswith ('config'): fd.write ('' + entry + '\n') fd.write ('\n\n') - if len(self.cmd) > 0: + if self.cmd: return execute (formatstr.format (cindex, self.action, self.name), self.cmd) - else: - return None + return None def set_executable (self): self.action = 'LB' - if len(exe_suffix) > 0 and self.name.endswith(exe_suffix): cc_file = self.name[:-len(exe_suffix)] - else: cc_file = self.name + if exe_suffix and self.name.endswith(exe_suffix): + cc_file = self.name[:-len(exe_suffix)] + else: + cc_file = self.name cc_file = modify_path (os.path.join (cmd_dir, os.sep.join (split_path(cc_file)[1:])), add=cpp_suffix) self.deps = list_cmd_deps(cc_file) if separate_project: @@ -621,20 +765,23 @@ class Entry: skip = False flags = [] - if 'Q' in file_flags[cc_file]: flags += qt_ldflags + if 'Q' in file_flags[cc_file]: + flags += qt_ldflags if not skip: if not ld_enabled: self.deps = self.deps.union (list_lib_deps()) self.cmd = fillin (ld, { - 'LDFLAGS': [ s.replace ('LIBNAME', os.path.basename (self.name)) for s in ld_flags ] + flags, - 'OBJECTS': self.deps, - 'EXECUTABLE': [ self.name ] }) + 'LINKFLAGS': [ s.replace ('LIBNAME', os.path.basename (self.name)) for s in ld_flags ] + flags, + 'OBJECTS': list(self.deps), + 'EXECUTABLE': [ self.name ] }) try: - if ld_use_shell: self.cmd = [ 'sh', '-c', ' '.join(self.cmd) ] - except NameError: pass + if ld_use_shell: + self.cmd = [ 'sh', '-c', ' '.join(self.cmd) ] + except NameError: + pass if ld_enabled: self.deps.add (os.path.normpath (os.path.join (target_lib_dir, libname))) @@ -663,13 +810,14 @@ class Entry: self.deps.add( cc_file ) skip = False - if 'Q' in file_flags[cc_file]: flags += qt_cflags + if 'Q' in file_flags[cc_file]: + flags += qt_cflags if not skip: self.cmd = fillin (cpp, { - 'CFLAGS': cpp_flags + flags, - 'OBJECT': [ self.name ], - 'SRC': [ cc_file ] }) + 'CFLAGS': cpp_flags + flags, + 'OBJECT': [ self.name ], + 'SRC': [ cc_file ] }) def set_moc (self): @@ -684,20 +832,21 @@ class Entry: def set_library (self): if not ld_enabled: - error ('ERROR: shared library generation is disabled in this configuration') - exit (1) + fail ('ERROR: shared library generation is disabled in this configuration') self.action = 'LD' self.deps = list_lib_deps() self.cmd = fillin (ld_lib, { - 'LDLIB_FLAGS': [ s.replace ('LIBNAME', os.path.basename (self.name)) for s in ld_lib_flags ], - 'OBJECTS': [ item for item in self.deps ], - 'LIB': [ self.name ] }) + 'LINKLIB_FLAGS': [ s.replace ('LIBNAME', os.path.basename (self.name)) for s in ld_lib_flags ], + 'OBJECTS': self.deps, + 'LIB': [ self.name ] }) try: - if ld_use_shell: self.cmd = [ 'sh', '-c', ' '.join(self.cmd) ] - except NameError: pass + if ld_use_shell: + self.cmd = [ 'sh', '-c', ' '.join(self.cmd) ] + except NameError: + pass def set_icon (self): @@ -717,21 +866,21 @@ class Entry: def display (self, indent=''): show_rebuild = lambda x: x+' [REBUILD]' if todo[x].need_rebuild() else x - sys.stdout.write (indent + '[' + self.action + '] ' + show_rebuild (self.name) + ':\n') - sys.stdout.write (indent + ' timestamp: ' + str(self.timestamp)) - if len(self.deps): - sys.stdout.write (', dep timestamp: ' + str(self.dep_timestamp) + ', diff: ' + str(self.timestamp-self.dep_timestamp)) - sys.stdout.write ('\n') - if len(self.cmd): - sys.stdout.write (indent + ' command: ' + ' '.join(self.cmd) + '\n') - if len(self.deps): - sys.stdout.write (indent + ' deps: ') + msg = indent + '[' + self.action + '] ' + show_rebuild (self.name) + ':\n' + msg += indent + ' timestamp: ' + str(self.timestamp) + if self.deps: + msg += ', dep timestamp: ' + str(self.dep_timestamp) + ', diff: ' + str(self.timestamp-self.dep_timestamp) + msg += '\n' + if self.cmd: + msg += indent + ' command: ' + ' '.join(self.cmd) + '\n' + if self.deps: + msg += indent + ' deps: ' if dep_recursive: - sys.stdout.write ('\n') + disp (msg + '\n') for x in self.deps: todo[x].display (indent + ' ') else: - sys.stdout.write ((indent+'\n ').join([ show_rebuild(x) for x in self.deps ]) + '\n') + disp (msg + (indent+'\n ').join([ show_rebuild(x) for x in self.deps ]) + '\n') @@ -745,8 +894,7 @@ class Entry: def default_targets(): if not os.path.isdir (cmd_dir): - sys.stderr.write ('ERROR: no "cmd" folder - unable to determine default targets' + os.linesep) - sys.exit (1) + fail ('ERROR: no "cmd" folder - unable to determine default targets' + os.linesep) for entry in os.listdir (cmd_dir): if entry.endswith(cpp_suffix): targets.append (os.path.normpath (os.path.join (target_bin_dir, entry[:-len(cpp_suffix)] + exe_suffix))) @@ -768,7 +916,8 @@ def is_icon (target): return os.path.basename (target) == 'icons'+cpp_suffix def mtime (target): - if not os.path.exists (target): return float('inf') + if not os.path.exists (target): + return float('inf') return os.stat(target).st_mtime @@ -788,45 +937,49 @@ def execute (message, cmd, working_dir=None): disp (message + os.linesep) log (' '.join(cmd) + os.linesep) + if dryrun: + return 0 + try: + start = timer() process = subprocess.Popen (cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=environ, cwd=working_dir) ( stdout, stderr ) = process.communicate() + end = timer() + if timingfile is not None: + logtime ('[{:>8.3f}s] '.format(end-start) + message + os.linesep) + if process.returncode != 0: - if error_stream is not None: - disp ('ERROR: ' + message + os.linesep) error ('\nERROR: ' + message + '\n\n' + ' '.join(cmd) + '\n\nfailed with output\n\n' + stderr.decode (errors='ignore')) return 1 errstr = None - if len(stdout) or len(stderr): + if stdout or stderr: errstr = '\n' + ' '.join(cmd) + ':\n\n' - if len(stdout): + if stdout: errstr += stdout.decode (errors='ignore') + '\n\n' - if len(stderr): + if stderr: errstr += stderr.decode (errors='ignore') + '\n\n' if errstr and not nowarnings: - disp (errstr); + disp (errstr) except OSError: - error (cmd[0] + ': command not found') + disp (cmd[0] + ': command not found') return 1 - except: - error ('unexpected exception: ' + str(sys.exc_info())) - raise + return None def print_deps (current_file, indent=''): current_file = os.path.normpath (current_file) - sys.stdout.write (indent + current_file) + msg = indent + current_file if current_file in file_flags: - if len(file_flags[current_file]): - sys.stdout.write (' [' + file_flags[current_file] + ']') - sys.stdout.write (os.linesep) - if len(todo[current_file].deps): - for entry in todo[current_file].deps: - print_deps (entry, indent + ' ') + if file_flags[current_file]: + msg += ' [' + file_flags[current_file] + ']' + msg += os.linesep + disp (msg) + for entry in todo[current_file].deps: + print_deps (entry, indent + ' ') @@ -837,7 +990,7 @@ def is_GUI_target (current_file): if current_file in file_flags: if 'Q' in file_flags[current_file]: return True - if len(todo[current_file].deps): + if todo[current_file].deps: for entry in todo[current_file].deps: if is_GUI_target (entry): return True @@ -861,8 +1014,7 @@ def list_headers (current_file): if not os.path.exists (current_file): if os.path.basename(current_file) == 'icons'+cpp_suffix: return headers[current_file] - sys.stderr.write ('ERROR: cannot find file "' + current_file + '"' + os.linesep) - sys.exit(1) + fail ('ERROR: cannot find file "' + current_file + '"' + os.linesep) with codecs.open (current_file, mode='r', encoding='utf-8') as fd: for line in fd: line = line.strip() @@ -874,11 +1026,11 @@ def list_headers (current_file): if os.path.exists (os.path.join (path, line)): line = os.path.normpath (os.path.join (path, line)) headers[current_file].add (line) - [ headers[current_file].add(entry) for entry in list_headers(line) ] + for entry in list_headers(line): + headers[current_file].add (entry) break else: - sys.stderr.write ('ERROR: cannot find header file \"' + line + '\" (from file \"' + current_file + '\")' + os.linesep) - sys.exit(1) + fail ('ERROR: cannot find header file \"' + line + '\" (from file \"' + current_file + '\")' + os.linesep) elif line == 'Q_OBJECT': if 'M' not in file_flags[current_file]: file_flags[current_file] += 'M' @@ -902,7 +1054,8 @@ def list_cmd_deps (file_cc): if file_cc not in object_deps.keys(): object_deps[file_cc] = set([ modify_path (file_cc, tmp=True, strip=cpp_suffix, add=obj_suffix) ]) for entry in list_headers (file_cc): - if os.path.abspath(entry).startswith(os.path.abspath(lib_dir)): continue + if os.path.abspath(entry).startswith(os.path.abspath(lib_dir)): + continue if 'M' in file_flags[entry]: object_deps[file_cc] = object_deps[file_cc].union ([ modify_path (entry, tmp=True, strip=h_suffix, add=moc_obj_suffix) ]) entry_cc = entry[:-len(h_suffix)] + cpp_suffix @@ -910,7 +1063,8 @@ def list_cmd_deps (file_cc): object_deps[file_cc] = object_deps[file_cc].union (list_cmd_deps(entry_cc)) if os.path.basename (entry) == 'icons'+h_suffix: object_deps[file_cc].add (modify_path (entry, tmp=True, strip=h_suffix, add=obj_suffix)) - if file_cc not in file_flags: file_flags[file_cc] = '' + if file_cc not in file_flags: + file_flags[file_cc] = '' for entry in headers[file_cc]: for c in file_flags[entry]: if c != 'M' and c not in file_flags[file_cc]: @@ -922,17 +1076,17 @@ def list_cmd_deps (file_cc): def list_lib_deps (): deps = set() - for root, dirs, files in os.walk (lib_dir): + for root, dummy_dirs, files in os.walk (lib_dir): for current_file in files: - if current_file[0] == '.': continue + if current_file[0] == '.': + continue if current_file.endswith (cpp_suffix): deps.add (modify_path (os.path.join (root, current_file), tmp=True, strip=cpp_suffix, add=obj_suffix)) + return deps - return (deps) - -def build_next (id): +def build_next (): global todo, lock, stop, main_cindex total_count = len(todo) cindex = 0 @@ -942,27 +1096,30 @@ def build_next (id): while not stop: current = None lock.acquire() - if len(todo): - for item in todo.keys(): - if todo[item].currently_being_processed: continue + if todo: + for item in todo: + if todo[item].currently_being_processed: + continue unsatisfied_deps = set(todo[item].deps).intersection (todo.keys()) - if not len(unsatisfied_deps): + if not unsatisfied_deps: todo[item].currently_being_processed = True current = item main_cindex+=1 cindex = main_cindex break - else: stop = max (stop, 1) + else: + stop = max (stop, 1) lock.release() - if stop: return - if current == None: + if stop: + return + if current is None: time.sleep (0.01) continue target = todo[current] if target.execute(cindex, formatstr): - todo[item].currently_being_processed = False + target.currently_being_processed = False stop = 2 return @@ -970,7 +1127,7 @@ def build_next (id): del todo[current] lock.release() - except: + except Exception: stop = 2 return @@ -978,77 +1135,155 @@ def build_next (id): -def start_html (fid, title, left, up, home, right): - fid.write ('\n\n\n\n') - fid.write ('MRtrix documentation\n\n\n\n\n') - fid.write ('\n\n\n') - fid.write ('\n') - fid.write ('\n') - fid.write ('\n') - fid.write ('\n\n\n') +#def start_html (fid, title, left, up, home, right): +# fid.write ('\n\n\n\n') +# fid.write ('MRtrix documentation\n\n\n\n\n') +# fid.write ('\n\n\n') +# fid.write ('\n') +# fid.write ('\n') +# fid.write ('\n') +# fid.write ('\n\n\n') +def version_from_git (folder): + try: + log ('fetching version information from git in folder "' + folder + '": ') + with open (os.devnull) as devnull: + tag = subprocess.check_output ([ 'git', 'describe', '--abbrev=0' ], cwd=folder, stderr=devnull).decode(errors='ignore').strip() #pylint: disable=unexpected-keyword-arg + commit = subprocess.check_output ([ 'git', 'describe', '--abbrev=8', '--dirty', '--always' ], cwd=folder, stderr=devnull).decode(errors='ignore').strip() #pylint: disable=unexpected-keyword-arg + log (tag + ', ' + commit + '\n') + return [ tag, commit ] + except Exception: + log ('not found\n') + raise LookupError + + + +def version_from_file (version_file, version_macro): + log ('fetching version information from "' + version_macro + '" in "' + version_file + '": ') + try: + with open (version_file) as fd: + for line in fd: + line = line.split ("//")[0] + if version_macro in line: + tag = line.split(version_macro)[1].strip().strip('"') + log (tag + '\n') + return tag + if not tag: + raise NameError + except Exception: + log ('not found\n') + raise LookupError + return None + + + + +def mrtrix_version (): + try: + return mrtrix_version.version + except AttributeError: + pass -def get_git_version (folder): log (''' -getting git version in folder "''' + folder + '"... ') +getting MRtrix version from folder "''' + mrtrix_dir[-1] + '"...\n ') + try: + tag = version_from_file (os.path.join (lib_dir, 'version.h'), 'MRTRIX_BASE_VERSION') + except LookupError: + fail ('ERROR: failed to read version information from file "' + os.path.join (lib_dir, 'version.h') + '"\n') try: - process = subprocess.Popen ([ 'git', 'describe', '--abbrev=8', '--dirty', '--always' ], cwd=folder, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - ( git_version, stderr ) = process.communicate() - if process.returncode == 0: - git_version = git_version.decode(errors='ignore').strip() - log (git_version + '\n') - return git_version - except: + log (' ') + version = version_from_git (mrtrix_dir[-1]) + if version[0] != tag: + fail ('ERROR: version stored in "' + os.path.join (lib_dir, 'version.h') + '" does not match git tag name\n') + mrtrix_version.version = version[1] + except LookupError: + mrtrix_version.version = tag + + log ('Using MRtrix version "' + mrtrix_version.version + '"\n') + return mrtrix_version.version + + + + +def project_version (): + try: + return project_version.version + except AttributeError: pass + project_version.version = None - log ('not found\n') - return 'unknown' + log (''' +getting MRtrix project version from current folder...\n ''') + try: + tag = version_from_file (os.path.join (misc_dir, 'project_version.h'), 'MRTRIX_PROJECT_VERSION') + except LookupError: + tag = None + + try: + log (' ') + version = version_from_git ('.') + if tag and version[0] != tag: + fail ('ERROR: project version stored in "' + os.path.join (misc_dir, 'project_version.h') + '" does not match tag name\n') + project_version.version = version[1] + except LookupError: + project_version.version = tag + log ('Using MRtrix project version "' + ( project_version.version or 'unknown' ) + '"\n') + return project_version.version + + + + + +def update_version (version, version_file, template): + version_file_contents = template.replace ('%%%', version or "unknown") -def update_git_version (folder, git_version_file, contents): - git_version = get_git_version (folder) - version_file_contents = contents.replace ('%%%', git_version) - current_version_file_contents = '' try: - with open (git_version_file) as fd: + with open (version_file) as fd: current_version_file_contents = fd.read() - except: + if ( version is None ) or ( version_file_contents == current_version_file_contents ): + log ('version file "' + version_file + '" is up to date\n') + return + except (IOError, OSError): pass - if not current_version_file_contents or (version_file_contents != current_version_file_contents and git_version != 'unknown'): - log ('version file "' + git_version_file + '" is out of date - updating\n') - with open (git_version_file, 'w') as fd: - fd.write (version_file_contents) + log ('version file "' + version_file + '" is out of date - updating\n') + with open (version_file, 'w') as fd: + fd.write (version_file_contents) + + + def update_bash_completion (): # Only attempt to generate completion file if POSIX compliant - if os.name != 'posix': return + if os.name != 'posix': + return # Check whether completion generation script exists script_path = os.path.join (mrtrix_dir[-1], 'generate_bash_completion.py') completion_path = os.path.join (mrtrix_dir[-1], 'mrtrix_bash_completion') if not os.path.isfile (script_path): - disp (colorize('WARNING: Skipping bash completion generation. Could not find script at ' + script_path + '\n')[0]) + disp (colorize('WARNING: Skipping bash completion generation. Could not find script at ' + script_path + '\n')) return # Check whether both command files and completion file exist and completion file is newer than commands if not os.path.isdir (target_bin_dir): return - else: - # Only look at relevant executables in bin file - commands = [comm for comm in os.listdir (target_bin_dir) if os.access( os.path.join( target_bin_dir, comm ), os.X_OK)] - if not commands: - return - else: - command_freshness_time = max (commands, key = lambda x: time.ctime ( os.path.getmtime( os.path.join( target_bin_dir, x ) ) ) ) - if os.path.isfile (completion_path) and time.ctime ( os.path.getmtime(completion_path) ) >= command_freshness_time: - return + + # Only look at relevant executables in bin file + commands = [comm for comm in os.listdir (target_bin_dir) if os.access( os.path.join( target_bin_dir, comm ), os.X_OK)] + if not commands: + return + + command_freshness_time = max (commands, key = lambda x: time.ctime ( os.path.getmtime( os.path.join( target_bin_dir, x ) ) ) ) + if os.path.isfile (completion_path) and time.ctime ( os.path.getmtime(completion_path) ) >= command_freshness_time: + return execute ('[SH] ' + completion_path, [ script_path, "-c", completion_path, "-m", target_bin_dir ] ) @@ -1057,29 +1292,28 @@ def update_bash_completion (): def update_user_docs (): # Only attempt to update docs if POSIX compliant - if os.name != 'posix': return + if os.name != 'posix': + return scripts_dir = os.path.abspath(os.path.join (mrtrix_dir[-1], 'docs' )) script_path = os.path.join (scripts_dir, 'generate_user_docs.sh') # Check whether generate docs script exists if not os.path.isfile (script_path): - disp (colorize('WARNING: Skipping user documentation generation. Could not find script at ' + script_path + '\n')[0]) + disp (colorize('WARNING: Skipping user documentation generation. Could not find script at ' + script_path + '\n')) return # Check whether commands dir exists if not os.path.isdir (target_bin_dir): return - import re - # Fetch relevant executables in bin file commands = [comm for comm in os.listdir (target_bin_dir) if os.access( os.path.join( target_bin_dir, comm ), os.X_OK) and not re.match (r'^\w+__debug', comm)] # Fetch relevant user docs rst_files = [] - for root, subfolders, files in os.walk (scripts_dir): + for root, dummy_subdirs, files in os.walk (scripts_dir): for rst_file in files: if rst_file.endswith('.rst'): rst_files.append(os.path.join(root, rst_file)) @@ -1102,7 +1336,7 @@ def update_user_docs (): ########################################################################### with open (os.path.join(mrtrix_dir[-1], script_dir, '_version.py'),'w') as vfile: - vfile.write('__version__ = "'+get_git_version (mrtrix_dir[-1])+'"\n') + vfile.write('__version__ = "'+ mrtrix_version() +'" #pylint: disable=unused-variable\n') ########################################################################### @@ -1110,13 +1344,13 @@ with open (os.path.join(mrtrix_dir[-1], script_dir, '_version.py'),'w') as vfile ########################################################################### -if len(targets) == 0: +if not targets: targets = default_targets() # get git version info: -update_git_version (mrtrix_dir[-1], os.path.join (lib_dir, 'version.cpp'), ''' +update_version (mrtrix_version(), os.path.join (lib_dir, 'version.cpp'), ''' namespace MR { namespace App { const char* mrtrix_version = "%%%"; @@ -1125,7 +1359,7 @@ namespace MR { } ''') -update_git_version (mrtrix_dir[-1], os.path.join (mrtrix_dir[-1], misc_dir, 'exec_version.cpp'), ''' +update_version (mrtrix_version(), os.path.join (mrtrix_dir[-1], misc_dir, 'exec_version.cpp'), ''' namespace MR { namespace App { extern const char* executable_uses_mrtrix_version; @@ -1137,8 +1371,7 @@ namespace MR { if separate_project: if not os.path.exists (misc_dir): os.mkdir (misc_dir) - git_version_file = os.path.join (misc_dir, 'project_version.cpp') - update_git_version ('.', git_version_file, ''' + update_version (project_version(), os.path.join (misc_dir, 'project_version.cpp'), ''' namespace MR { namespace App { extern const char* project_version; @@ -1151,12 +1384,6 @@ namespace MR { } ''') - if not os.path.exists (git_version_file): - with open (git_version_file, 'w'): - pass - - - @@ -1165,8 +1392,10 @@ namespace MR { log (''' compiling TODO list... ''') - -[ Entry(item) for item in targets ] +try: + [ Entry(item) for item in targets ] # pylint: disable=expression-not-assigned +except TargetException as excp: + fail ('ERROR: ' + str(excp) + '\n') @@ -1179,9 +1408,9 @@ if nogui: targets = nogui_targets nogui_todo = {} - for item in todo.keys(): + for item in todo: if not is_GUI_target (todo[item].name): - nogui_todo[item] = todo[item] + nogui_todo[item] = todo[item] todo = nogui_todo @@ -1192,7 +1421,7 @@ log ('building targets: ' + ' '.join (targets) + os.linesep) if dependencies==1: - sys.stderr.write (''' + disp (''' Printing dependencies for targets: ''') @@ -1200,7 +1429,7 @@ Printing dependencies for targets: todo[entry].display() sys.exit (0) elif dependencies==2: - sys.stderr.write (''' + disp (''' Printing dependencies for all files: ''') @@ -1209,7 +1438,7 @@ Printing dependencies for all files: sys.exit (0) todo_tmp = {} -for item in todo.keys(): +for item in todo: if todo[item].action != '--' and todo[item].need_rebuild(): todo_tmp[item] = todo[item] todo = todo_tmp @@ -1219,13 +1448,17 @@ log ('TODO list contains ' + str(len(todo)) + ''' items ''') -#for entry in todo.values(): entry.display() -try: num_processors = int(os.environ['NUMBER_OF_PROCESSORS']) -except: - try: num_processors = os.sysconf('SC_NPROCESSORS_ONLN') - except: num_processors = 1 +#for entry in todo.values(): +# entry.display() +try: + num_processors = int(os.environ['NUMBER_OF_PROCESSORS']) +except (KeyError, TypeError): + try: + num_processors = os.sysconf('SC_NPROCESSORS_ONLN') + except ValueError: + num_processors = 1 -while len(todo): +while todo: stop = False main_cindex = 0 @@ -1238,14 +1471,15 @@ while len(todo): ''') threads = [] - for i in range (1, num_processors): - t = threading.Thread (target=build_next, args=(i,)); + for i in range (1, num_processors): # pylint: disable=unused-variable + t = threading.Thread (target=build_next) t.start() threads.append (t) - build_next(0) + build_next() - for t in threads: t.join() + for t in threads: + t.join() if not persistent: break @@ -1257,7 +1491,7 @@ stopping despite errors as no jobs completed successfully ''') break - if len(todo): + if todo: disp (''' retrying as running in persistent mode @@ -1275,5 +1509,3 @@ if not separate_project and stop <= 1: update_user_docs() sys.exit (stop > 1) - - diff --git a/check_syntax b/check_syntax index 5fd04d5a7e..10d6499e2c 100755 --- a/check_syntax +++ b/check_syntax @@ -1,13 +1,39 @@ #!/bin/bash +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + LOG=syntax.log echo -n "Checking syntax and memory alignment for Eigen 3.3 compatibility... " echo "Checking syntax and memory alignment for Eigen 3.3 compatibility..." > $LOG echo "" >> $LOG +grep=grep +v=`grep --version` +if [[ "$OSTYPE" == darwin* ]] && [[ $v == *"BSD"* ]]; then + if ! hash ggrep 2>/dev/null; then + echo 'Aborting, this script requires gnu-grep which you can install for instance via "brew install grep"' + exit 1 + else + grep=ggrep + fi +fi + retval=0 -for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | grep -v '_moc.cpp'); do +for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | $grep -v '_moc.cpp'); do # files to ignore: [[ "$f" -ef "src/gui/shview/icons.h" || @@ -26,7 +52,7 @@ for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | grep -v '_mo # process the file to strip comments, macros, etc: cat $f | \ # remove C preprocessor macros: - grep -v '^#' | \ + $grep -v '^#' | \ # remove C++ single-line comments: perl -pe 's|//.*$||' | \ # remove all newlines to make file one long line: @@ -38,7 +64,6 @@ for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | grep -v '_mo # remove quoted strings: cat .check_syntax2.tmp | perl -pe 's/(")(\\"|.)*?"//g' > .check_syntax.tmp - # detect classes not declared MEMALIGN or NOMEMALIGN: res=$( cat .check_syntax.tmp | \ @@ -49,32 +74,33 @@ for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | grep -v '_mo perl -pe 's|<[^{};<]*?>||g' | \ perl -pe 's|<[^{};<]*?>||g' | \ # match for the parts we're interested in and output just the bits that match: - grep -Eo '(enum\s*)?\b(class|struct)\b[^;{]*?{(\s*(MEMALIGN\s*\([^\)]*\)|NOMEMALIGN))?' | \ + $grep -Eo '(enum\s*)?\b(class|struct)\b[^;{]*?{(\s*(MEMALIGN\s*\([^\)]*\)|NOMEMALIGN))?' | \ # remove matches that correspond to an enum class declaration: - grep -Ev '\benum\s*class\b' | \ + $grep -Ev '\benum\s*class\b' | \ # remove matches that are properly specified: - grep -Ev '\b(class|struct)\b[^;{]*?{(\s*(MEMALIGN\s*\([^\)]*\)|NOMEMALIGN))' + $grep -Ev '\b(class|struct)\b[^;{]*?{(\s*(MEMALIGN\s*\([^\)]*\)|NOMEMALIGN))' ) # detect any instances of std::vector: res="$res"$( cat .check_syntax.tmp | \ # match for the parts we're interested in and output just the bits that match: - grep -Po '(? ("masks", ""); + const std::string voxels_prefix = get_option_value ("voxels", ""); size_t major_error_count = 0, minor_error_count = 0; for (size_t i = 0; i != argument.size(); ++i) { @@ -64,7 +65,7 @@ void run () Header H_out (in); H_out.ndim() = 3; H_out.datatype() = DataType::Bit; - if (mask_prefix.size()) + if (voxels_prefix.size()) voxels = Image::scratch (H_out, "Scratch image for " + argument[i]); try { @@ -77,11 +78,17 @@ void run () // Manually loop through all voxels, make sure that for every voxel the // sum of tissue partial volumes is either 0 or 1 (or close enough) - size_t voxel_error_sum = 0; + // Also make sure that individual partial volume fractions do not lie + // outside the range [0.0, 1.0] + size_t voxel_error_sum = 0, voxel_error_abs = 0; for (auto outer = Loop(in, 0, 3) (in); outer; ++outer) { default_type sum = 0.0; - for (auto inner = Loop(3) (in); inner; ++inner) + bool abs_error = false; + for (auto inner = Loop(3) (in); inner; ++inner) { sum += in.value(); + if (in.value() < 0.0f || in.value() > 1.0f) + abs_error = true; + } if (!sum) continue; if (abs (sum-1.0) > MAX_ERROR) { ++voxel_error_sum; @@ -90,35 +97,47 @@ void run () voxels.value() = true; } } + if (abs_error) { + ++voxel_error_abs; + if (voxels.valid()) { + assign_pos_of (in, 0, 3).to (voxels); + voxels.value() = true; + } + } } if (voxel_error_sum == 1) { INFO ("Image \"" + argument[i] + "\" contains just one isolated voxel with non-unity sum of partial volume fractions"); } else if (voxel_error_sum) { WARN ("Image \"" + argument[i] + "\" contains " + str(voxel_error_sum) + " brain voxels with non-unity sum of partial volume fractions"); - ++minor_error_count; - if (voxels.valid()) { - std::string path = mask_prefix; - if (argument.size() > 1) { - path += Path::basename (argument[i]); - } else { - bool has_extension = false; - for (auto p = MR::Formats::known_extensions; *p; ++p) { - if (Path::has_suffix (path, std::string (*p))) { - has_extension = true; - break; - } + if (!voxel_error_abs) + ++minor_error_count; + } else if (!voxel_error_abs) { + INFO ("Image \"" + argument[i] + "\" conforms to 5TT format"); + } + + if ((voxel_error_sum || voxel_error_abs) && voxels.valid()) { + std::string path = voxels_prefix; + if (argument.size() > 1) { + path += Path::basename (argument[i]); + } else { + bool has_extension = false; + for (auto p = MR::Formats::known_extensions; *p; ++p) { + if (Path::has_suffix (path, std::string (*p))) { + has_extension = true; + break; } - if (!has_extension) - path += ".mif"; } - auto out = Image::create (path, H_out); - copy (voxels, out); + if (!has_extension) + path += ".mif"; } - } else { - INFO ("Image \"" + argument[i] + "\" conforms to 5TT format"); + auto out = Image::create (path, H_out); + copy (voxels, out); } + if (voxel_error_abs) + throw Exception ("Image \"" + argument[i] + "\" contains " + str(voxel_error_abs) + " brain voxels with a non-physical partial volume fraction"); + } catch (Exception& e) { e.display(); WARN ("Image \"" + argument[i] + "\" does not conform to fundamental 5TT format requirements"); @@ -139,6 +158,8 @@ void run () } else { WARN ("Input image does not perfectly conform to 5TT format, but may still be applicable" + vox_option_suggestion); } + } else { + CONSOLE(std::string(argument.size() > 1 ? "All images" : "Input image") + " checked OK"); } } diff --git a/cmd/5ttedit.cpp b/cmd/5ttedit.cpp index 249d2a0685..a339d21db7 100644 --- a/cmd/5ttedit.cpp +++ b/cmd/5ttedit.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "header.h" diff --git a/cmd/afdconnectivity.cpp b/cmd/afdconnectivity.cpp index 47c5ba479d..a3d31bf275 100644 --- a/cmd/afdconnectivity.cpp +++ b/cmd/afdconnectivity.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "memory.h" #include "dwi/fmls.h" diff --git a/cmd/amp2response.cpp b/cmd/amp2response.cpp index 8c0d26d4b8..96facc3a7c 100644 --- a/cmd/amp2response.cpp +++ b/cmd/amp2response.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" @@ -20,15 +21,14 @@ #include "image.h" #include "image_helpers.h" #include "types.h" - +#include "dwi/gradient.h" +#include "dwi/shells.h" #include "math/constrained_least_squares.h" #include "math/rng.h" #include "math/sphere.h" #include "math/SH.h" #include "math/ZSH.h" -#include "dwi/gradient.h" -#include "dwi/shells.h" @@ -37,15 +37,10 @@ using namespace App; -//#define AMP2RESPONSE_DEBUG -//#define AMP2RESPONSE_PERVOXEL_IMAGES - - - void usage () { - AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au) and J-Donald Tournier (jdtournier@gmail.com)"; SYNOPSIS = "Estimate response function coefficients based on the DWI signal in single-fibre voxels"; @@ -109,14 +104,105 @@ Eigen::Matrix gen_rotation_matrix (const Eigen::Vector3& dir vector all_volumes (const size_t num) { - vector result; - result.reserve (num); + vector result (num); for (size_t i = 0; i != num; ++i) - result.push_back (i); + result[i] = i; return result; } + + +class Accumulator { MEMALIGN(Accumulator) + public: + class Shared { MEMALIGN(Shared) + public: + Shared (int lmax, const vector& volumes, const Eigen::MatrixXd& dirs) : + lmax (lmax), + dirs (dirs), + volumes (volumes), + M (Eigen::MatrixXd::Zero (Math::ZSH::NforL (lmax), Math::ZSH::NforL (lmax))), + b (Eigen::VectorXd::Zero (M.rows())), + count (0) { } + + const int lmax; + const Eigen::MatrixXd& dirs; + const vector& volumes; + Eigen::MatrixXd M; + Eigen::VectorXd b; + size_t count; + }; + + Accumulator (Shared& shared) : + S (shared), + amplitudes (S.volumes.size()), + b (S.b), + M (S.M), + count (0), + rotated_dirs_cartesian (S.dirs.rows(), 3) { } + + ~Accumulator () + { + // accumulate results from all threads: + S.M += M; + S.b += b; + S.count += count; + } + + void operator() (Image& amp_image, Image& dir_image, Image& mask) + { + if (mask.value()) { + ++count; + + // Grab the fibre direction + Eigen::Vector3 fibre_dir; + for (dir_image.index(3) = 0; dir_image.index(3) != 3; ++dir_image.index(3)) + fibre_dir[dir_image.index(3)] = dir_image.value(); + fibre_dir.normalize(); + + // Rotate the directions into a new reference frame, + // where the Z axis is defined by the specified direction + auto R = gen_rotation_matrix (fibre_dir); + rotated_dirs_cartesian.transpose() = R * S.dirs.transpose(); + + // Convert directions from Euclidean space to azimuth/elevation pairs + Eigen::MatrixXd rotated_dirs_azel = Math::Sphere::cartesian2spherical (rotated_dirs_cartesian); + + // Constrain elevations to between 0 and pi/2 + for (ssize_t i = 0; i != rotated_dirs_azel.rows(); ++i) { + if (rotated_dirs_azel (i, 1) > Math::pi_2) { + if (rotated_dirs_azel (i, 0) > Math::pi) + rotated_dirs_azel (i, 0) -= Math::pi; + else + rotated_dirs_azel (i, 0) += Math::pi; + rotated_dirs_azel (i, 1) = Math::pi - rotated_dirs_azel (i, 1); + } + } + + // Generate the ZSH -> amplitude transform + transform = Math::ZSH::init_amp_transform (rotated_dirs_azel.col(1), S.lmax); + + // Grab the image data + for (size_t i = 0; i != S.volumes.size(); ++i) { + amp_image.index(3) = S.volumes[i]; + amplitudes[i] = amp_image.value(); + } + + // accumulate results: + b += transform.transpose() * amplitudes; + M.selfadjointView().rankUpdate (transform.transpose()); + } + } + + protected: + Shared& S; + Eigen::VectorXd amplitudes, b; + Eigen::MatrixXd M, transform; + size_t count; + Eigen::Matrix rotated_dirs_cartesian; +}; + + void run () { @@ -148,7 +234,7 @@ void run () dirs_azel.push_back (std::move (directions)); volumes.push_back (all_volumes (dirs_azel.size())); } else { - auto grad = DWI::get_valid_DW_scheme (header); + auto grad = DWI::get_DW_scheme (header); shells.reset (new DWI::Shells (grad)); shells->select_shells (false, false, false); for (size_t i = 0; i != shells->count(); ++i) { @@ -176,6 +262,13 @@ void run () throw Exception ("Values specified for lmax must be even"); max_lmax = std::max (max_lmax, i); } + if ((*shells)[0].is_bzero() && lmax.front()) { + WARN ("Non-zero lmax requested for " + + ((*shells)[0].get_mean() ? + "first shell (mean b=" + str((*shells)[0].get_mean()) + "), which MRtrix3 has classified as b=0;" : + "b=0 shell;")); + WARN (" unless intended, this is likely to fail, as b=0 contains no orientation contrast"); + } } else { // Auto-fill lmax // Because the amp->SH transform doesn't need to be applied per voxel, @@ -209,169 +302,81 @@ void run () if (!num_voxels) throw Exception ("input mask does not contain any voxels"); - Eigen::MatrixXd responses (dirs_azel.size(), Math::ZSH::NforL (max_lmax)); - - for (size_t shell_index = 0; shell_index != dirs_azel.size(); ++shell_index) { + const bool use_ols = get_options ("noconstraint").size(); - std::string shell_desc = (dirs_azel.size() > 1) ? ("_shell" + str(shell_index)) : ""; + CONSOLE (std::string("estimating response function using ") + + ( use_ols ? "ordinary" : "constrained" ) + + " least-squares from " + str(num_voxels) + " voxels"); - Eigen::MatrixXd dirs_cartesian = Math::Sphere::spherical2cartesian (dirs_azel[shell_index]); - // All directions from all SF voxels get concatenated into a single large matrix - Eigen::MatrixXd cat_transforms (num_voxels * dirs_azel[shell_index].rows(), Math::ZSH::NforL (lmax[shell_index])); - Eigen::VectorXd cat_data (num_voxels * dirs_azel[shell_index].rows()); -#ifdef AMP2RESPONSE_DEBUG - // To make sure we've got our data rotated correctly, let's generate a scatterplot of - // elevation vs. amplitude - Eigen::MatrixXd scatter; -#endif - size_t voxel_counter = 0; - for (auto l = Loop (mask, 0, 3) (image, mask, dir_image); l; ++l) { - if (mask.value()) { - - // Grab the image data - Eigen::VectorXd data (dirs_azel[shell_index].rows()); - for (size_t i = 0; i != volumes[shell_index].size(); ++i) { - image.index(3) = volumes[shell_index][i]; - data[i] = image.value(); - } - - // Grab the fibre direction - Eigen::Vector3 fibre_dir; - for (dir_image.index(3) = 0; dir_image.index(3) != 3; ++dir_image.index(3)) - fibre_dir[dir_image.index(3)] = dir_image.value(); - fibre_dir.normalize(); - - // Rotate the directions into a new reference frame, - // where the Z axis is defined by the specified direction - Eigen::Matrix R = gen_rotation_matrix (fibre_dir); - Eigen::Matrix rotated_dirs_cartesian (dirs_cartesian.rows(), 3); - Eigen::Vector3 vec (3), rot (3); - for (ssize_t row = 0; row != dirs_azel[shell_index].rows(); ++row) { - vec = dirs_cartesian.row (row); - rot = R * vec; - rotated_dirs_cartesian.row (row) = rot; - } - - // Convert directions from Euclidean space to azimuth/elevation pairs - Eigen::MatrixXd rotated_dirs_azel = Math::Sphere::cartesian2spherical (rotated_dirs_cartesian); - - // Constrain elevations to between 0 and pi/2 - for (ssize_t i = 0; i != rotated_dirs_azel.rows(); ++i) { - if (rotated_dirs_azel (i, 1) > Math::pi_2) { - if (rotated_dirs_azel (i, 0) > Math::pi) - rotated_dirs_azel (i, 0) -= Math::pi; - else - rotated_dirs_azel (i, 0) += Math::pi; - rotated_dirs_azel (i, 1) = Math::pi - rotated_dirs_azel (i, 1); - } - } - -#ifdef AMP2RESPONSE_PERVOXEL_IMAGES - // For the sake of generating a figure, output the original and rotated signals to a dixel ODF image - Header rotated_header (header); - rotated_header.size(0) = rotated_header.size(1) = rotated_header.size(2) = 1; - rotated_header.size(3) = volumes[shell_index].size(); - Header nonrotated_header (rotated_header); - nonrotated_header.size(3) = header.size(3); - Eigen::MatrixXd rotated_grad (volumes[shell_index].size(), 4); - for (size_t i = 0; i != volumes.size(); ++i) { - rotated_grad.block<1,3>(i, 0) = rotated_dirs_cartesian.row(i); - rotated_grad(i, 3) = 1000.0; - } - DWI::set_DW_scheme (rotated_header, rotated_grad); - Image out_rotated = Image::create ("rotated_amps_" + str(sf_counter) + shell_desc + ".mif", rotated_header); - Image out_nonrotated = Image::create ("nonrotated_amps_" + str(sf_counter) + shell_desc + ".mif", nonrotated_header); - out_rotated.index(0) = out_rotated.index(1) = out_rotated.index(2) = 0; - out_nonrotated.index(0) = out_nonrotated.index(1) = out_nonrotated.index(2) = 0; - for (size_t i = 0; i != volumes[shell_index].size(); ++i) { - image.index(3) = volumes[shell_index][i]; - out_rotated.index(3) = i; - out_rotated.value() = image.value(); - } - for (ssize_t i = 0; i != header.size(3); ++i) { - image.index(3) = out_nonrotated.index(3) = i; - out_nonrotated.value() = image.value(); - } -#endif - - // Generate the ZSH -> amplitude transform - Eigen::MatrixXd transform = Math::ZSH::init_amp_transform (rotated_dirs_azel.col(1), lmax[shell_index]); - - // Concatenate these data to the ICLS matrices - cat_transforms.block (voxel_counter * data.size(), 0, transform.rows(), transform.cols()) = transform; - cat_data.segment (voxel_counter * data.size(), data.size()) = data; - -#ifdef AMP2RESPONSE_DEBUG - scatter.conservativeResize (cat_data.size(), 2); - scatter.block (old_rows, 0, data.size(), 1) = rotated_dirs_azel.col(1); - scatter.block (old_rows, 1, data.size(), 1) = data; -#endif + Eigen::MatrixXd responses (dirs_azel.size(), Math::ZSH::NforL (max_lmax)); - ++voxel_counter; + for (size_t shell_index = 0; shell_index != dirs_azel.size(); ++shell_index) { + // check the ZSH -> amplitude transform upfront: + { + auto transform = Math::ZSH::init_amp_transform (dirs_azel[shell_index].col(1), lmax[shell_index]); + if (!transform.allFinite()) { + Exception e ("Unable to construct A2SH transformation for shell b=" + str(int(std::round((*shells)[shell_index].get_mean()))) + ";"); + e.push_back (" lmax (" + str(lmax[shell_index]) + ") may be too large for this shell"); + if (!shell_index && (*shells)[0].is_bzero()) + e.push_back (" (this appears to be a b=0 shell, and therefore lmax should be set to 0 for this shell)"); + throw e; } } -#ifdef AMP2RESPONSE_DEBUG - save_matrix (scatter, "scatter" + shell_desc + ".csv"); -#endif - Eigen::VectorXd rf; - shell_desc = (shells && shells->count() > 1) ? ("Shell b=" + str(int(std::round((*shells)[shell_index].get_mean()))) + ": ") : ""; - // Is this anything other than an isotropic response? - if (lmax[shell_index]) { - - if (get_options("noconstraint").size()) { - - // Get an ordinary least squares solution - Eigen::HouseholderQR solver (cat_transforms); - rf = solver.solve (cat_data); + auto dirs_cartesian = Math::Sphere::spherical2cartesian (dirs_azel[shell_index]); - CONSOLE (shell_desc + "Response function [" + str(rf.transpose().cast()) + "] solved via ordinary least-squares from " + str(voxel_counter) + " voxels"); + Accumulator::Shared shared (lmax[shell_index], volumes[shell_index], dirs_cartesian); + ThreadedLoop(image, 0, 3).run (Accumulator (shared), image, dir_image, mask); - } else { - - // Generate the constraint matrix - // We are going to both constrain the amplitudes to be non-negative, and constrain the derivatives to be non-negative - const size_t num_angles_constraint = 90; - Eigen::VectorXd els; - els.resize (num_angles_constraint+1); - for (size_t i = 0; i <= num_angles_constraint; ++i) - els[i] = default_type(i) * Math::pi / 180.0; - Eigen::MatrixXd amp_transform = Math::ZSH::init_amp_transform (els, lmax[shell_index]); - Eigen::MatrixXd deriv_transform = Math::ZSH::init_deriv_transform (els, lmax[shell_index]); - - Eigen::MatrixXd constraints (amp_transform.rows() + deriv_transform.rows(), amp_transform.cols()); - constraints.block (0, 0, amp_transform.rows(), amp_transform.cols()) = amp_transform; - constraints.block (amp_transform.rows(), 0, deriv_transform.rows(), deriv_transform.cols()) = deriv_transform; + Eigen::VectorXd rf; + // Is this anything other than an isotropic response? - // Initialise the problem solver - auto problem = Math::ICLS::Problem (cat_transforms, constraints, 1e-10, 1e-10); - auto solver = Math::ICLS::Solver (problem); + if (!lmax[shell_index] || use_ols) { - // Estimate the solution - const size_t niter = solver (rf, cat_data); + rf = shared.M.llt().solve (shared.b); - CONSOLE (shell_desc + "Response function [" + str(rf.transpose().cast()) + " ] solved after " + str(niter) + " constraint iterations from " + str(voxel_counter) + " voxels"); + } else { - } + // Generate the constraint matrix + // We are going to both constrain the amplitudes to be non-negative, and constrain the derivatives to be non-negative + const size_t num_angles_constraint = 90; + Eigen::VectorXd els (num_angles_constraint+1); + for (size_t i = 0; i <= num_angles_constraint; ++i) + els[i] = default_type(i) * Math::pi / 180.0; + auto amp_transform = Math::ZSH::init_amp_transform (els, lmax[shell_index]); + auto deriv_transform = Math::ZSH::init_deriv_transform (els, lmax[shell_index]); - } else { + Eigen::MatrixXd constraints (amp_transform.rows() + deriv_transform.rows(), amp_transform.cols()); + constraints.block (0, 0, amp_transform.rows(), amp_transform.cols()) = amp_transform; + constraints.block (amp_transform.rows(), 0, deriv_transform.rows(), deriv_transform.cols()) = deriv_transform; - // lmax is zero - perform a straight average of the image data - rf.resize(1); - rf[0] = cat_data.mean() * std::sqrt(4*Math::pi); + // Initialise the problem solver + auto problem = Math::ICLS::Problem (shared.M, constraints, Eigen::VectorXd(), 0, 1e-10, 1e-10, 0, 0.0, true); + auto solver = Math::ICLS::Solver (problem); - CONSOLE (shell_desc + "Response function [ " + str(float(rf[0])) + " ] from average of " + str(voxel_counter) + " voxels"); + // Estimate the solution + const size_t niter = solver (rf, shared.b); + INFO ("constrained least-squares solver completed in " + str(niter) + " iterations"); } + CONSOLE (" b=" + str((*shells)[shell_index].get_mean(), 4) + ": [" + str(rf.transpose().cast()) + "]"); + rf.conservativeResizeLike (Eigen::VectorXd::Zero (Math::ZSH::NforL (max_lmax))); responses.row(shell_index) = rf; } - save_matrix (responses, argument[3]); + KeyValues keyvals; + if (shells) { + std::string line = str((*shells)[0].get_mean()); + for (size_t i = 1; i != (*shells).count(); ++i) + line += "," + str((*shells)[i].get_mean()); + keyvals["Shells"] = line; + } + save_matrix (responses, argument[3], keyvals); } diff --git a/cmd/amp2sh.cpp b/cmd/amp2sh.cpp index 5f89dea612..4f3526ed81 100644 --- a/cmd/amp2sh.cpp +++ b/cmd/amp2sh.cpp @@ -1,26 +1,27 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "phase_encoding.h" #include "progressbar.h" -#include "math/SH.h" +#include "algo/threaded_loop.h" #include "dwi/gradient.h" #include "dwi/shells.h" -#include "algo/threaded_loop.h" +#include "math/SH.h" using namespace MR; @@ -38,10 +39,12 @@ void usage () "to the amplitude data." + "The directions can be defined either as a DW gradient scheme (for example to compute " - "the SH representation of the DW signal) or a set of [az el] pairs as output by the dirgen " - "command. The DW gradient scheme or direction set can be supplied within the input " - "image header or using the -gradient or -directions option. Note that if a direction set " - "and DW gradient scheme can be found, the direction set will be used by default." + "the SH representation of the DW signal), a set of [az el] pairs as output by the dirgen " + "command, or a set of [ x y z ] directions in Cartesian coordinates. The DW " + "gradient scheme or direction set can be supplied within the input image " + "header or using the -gradient or -directions option. Note that if a " + "direction set and DW gradient scheme can be found, the direction set " + "will be used by default." + Math::SH::encoding_description; @@ -62,7 +65,7 @@ void usage () + Option ("directions", "the directions corresponding to the input amplitude image used to sample AFD. " "By default this option is not required providing the direction set is supplied " "in the amplitude image. This should be supplied as a list of directions [az el], " - "as generated using the dirgen command") + "as generated using the dirgen command, or as a list of [ x y z ] Cartesian coordinates.") + Argument ("file").type_file_in() + Option ("rician", "correct for Rician noise induced bias, using noise map supplied") @@ -84,10 +87,10 @@ class Amp2SHCommon { MEMALIGN(Amp2SHCommon) public: template Amp2SHCommon (const MatrixType& sh2amp, - const vector& bzeros, - const vector& dwis, + const vector& bzeros, + const vector& dwis, bool normalise_to_bzero) : - sh2amp (sh2amp), + sh2amp (sh2amp), amp2sh (Math::pinv (sh2amp)), bzeros (bzeros), dwis (dwis), @@ -105,14 +108,14 @@ class Amp2SHCommon { MEMALIGN(Amp2SHCommon) class Amp2SH { MEMALIGN(Amp2SH) public: - Amp2SH (const Amp2SHCommon& common) : - C (common), + Amp2SH (const Amp2SHCommon& common) : + C (common), a (common.amp2sh.cols()), s (common.amp2sh.rows()), c (common.amp2sh.rows()) { } template - void operator() (SHImageType& SH, AmpImageType& amp) + void operator() (SHImageType& SH, AmpImageType& amp) { get_amps (amp); c.noalias() = C.amp2sh * a; @@ -123,7 +126,7 @@ class Amp2SH { MEMALIGN(Amp2SH) // Rician-corrected version: template - void operator() (SHImageType& SH, AmpImageType& amp, const NoiseImageType& noise) + void operator() (SHImageType& SH, AmpImageType& amp, const NoiseImageType& noise) { w = Eigen::VectorXd::Ones (C.sh2amp.rows()); @@ -134,7 +137,7 @@ class Amp2SH { MEMALIGN(Amp2SH) sh2amp = C.sh2amp; if (get_rician_bias (sh2amp, noise.value())) break; - for (ssize_t n = 0; n < sh2amp.rows(); ++n) + for (ssize_t n = 0; n < sh2amp.rows(); ++n) sh2amp.row (n).array() *= w[n]; s.noalias() = sh2amp.transpose() * ap; @@ -188,7 +191,7 @@ class Amp2SH { MEMALIGN(Amp2SH) norm_amp += Math::pow2 (a[n]); ap[n] += diff; } - return norm_diff/norm_amp < 1.0e-8; + return norm_diff/norm_amp < 1.0e-8; } }; @@ -206,7 +209,9 @@ void run () auto opt = get_options ("directions"); if (opt.size()) { dirs = load_matrix (opt[0][0]); - } + if (dirs.cols() == 3) + dirs = Math::Sphere::cartesian2spherical (dirs); + } else { auto hit = header.keyval().find ("directions"); if (hit != header.keyval().end()) { @@ -222,9 +227,9 @@ void run () } header.keyval()["basis_directions"] = hit->second; header.keyval().erase (hit); - } + } else { - auto grad = DWI::get_valid_DW_scheme (amp); + auto grad = DWI::get_DW_scheme (amp); DWI::Shells shells (grad); shells.select_shells (true, false, false); if (shells.smallest().is_bzero()) diff --git a/cmd/connectome2tck.cpp b/cmd/connectome2tck.cpp index eb1d3b8e02..549472b755 100644 --- a/cmd/connectome2tck.cpp +++ b/cmd/connectome2tck.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include @@ -92,9 +93,56 @@ void usage () "(most typically there will be two entries per streamline, one for each endpoint; but this is not strictly a requirement). " "This file will most typically be generated using the tck2connectome command with the -out_assignments option."; + EXAMPLES + + Example ("Default usage", + "connectome2tck tracks.tck assignments.txt edge-", + "The command will generate one track file for every edge in the connectome, with the name of each file " + "indicating the nodes connected via that edge; for instance, all streamlines connecting nodes " + "23 and 49 will be written to file \"edge-23-49.tck\".") + + + Example ("Extract only the streamlines between nodes 1 and 2", + "connectome2tck tracks.tck assignments.txt tracks_1_2.tck -nodes 1,2 -exclusive -files single", + "Since only a single edge is of interest, this example provides only the two nodes involved " + "in that edge to the -nodes option, adds the -exclusive option so that only streamlines for which " + "both assigned nodes are in the list of nodes of interest are extracted (i.e. only streamlines connecting " + "nodes 1 and 2 in this example), and writes the result to a single output track file.") + + + Example ("Extract the streamlines connecting node 15 to all other nodes in the parcellation, with one track file for each edge", + "tck2connectome tracks.tck assignments.txt from_15_to_ -nodes 15 -keep_self", + "The command will generate the same number of track files as there are nodes in the parcellation: " + "one each for the streamlines connecting node 15 to every other node; i.e. " + "\"from_15_to_1.tck\", \"from_15_to_2.tck\", \"from_15_to_3.tck\", etc.. " + "Because the -keep_self option is specified, file \"from_15_to_15.tck\" will also be " + "generated, containing those streamlines that connect to node 15 at both endpoints.") + + + Example ("For every node, generate a file containing all streamlines connected to that node", + "connectome2tck tracks.tck assignments.txt node -files per_node", + "Here the command will generate one track file for every node in the connectome: " + "\"node1.tck\", \"node2.tck\", \"node3.tck\", etc.. Each of these files will contain " + "all streamlines that connect the node of that index to another node in the connectome " + "(it does not select all tracks connecting a particular node, since the -keep_self " + "option was omitted and therefore e.g. a streamline that is assigned to node 41 will " + "not be present in file \"node41.tck\"). Each streamline in the input tractogram will " + "in fact appear in two different output track files; e.g. a streamline connecting nodes " + "8 and 56 will be present both in file \"node8.tck\" and file \"node56.tck\".") + + + Example ("Get all streamlines that were not successfully assigned to a node pair", + "connectome2tck tracks.tck assignments.txt unassigned.tck -nodes 0 -keep_self -files single", + "Node index 0 corresponds to streamline endpoints that were not successfully assigned to " + "a node. As such, by selecting all streamlines that are assigned to \"node 0\" " + "(including those streamlines for which neither endpoint is assigned to a node due to " + "use of the -keep_self option), the single output track file will contain all streamlines " + "for which at least one of the two endpoints was not successfully assigned to a node.") + + + Example ("Generate a single track file containing edge exemplar trajectories", + "tck2connectome tracks.tck assignments.txt exemplars.tck -files single -exemplars nodes.mif", + "This produces the track file that is required as input when attempting to display " + "connectome edges using the streamlines or streamtubes geometries within the meview " + "connectome tool."); + ARGUMENTS + Argument ("tracks_in", "the input track file").type_file_in() - + Argument ("assignments_in", "text file containing the node assignments for each streamline").type_file_in() + + Argument ("assignments_in", "input text file containing the node assignments for each streamline").type_file_in() + Argument ("prefix_out", "the output file / prefix").type_text(); @@ -123,6 +171,9 @@ void run () std::string line; ProgressBar progress ("reading streamline assignments file"); while (std::getline (stream, line)) { + line = strip (line.substr (0, line.find_first_of ('#'))); + if (line.empty()) + continue; std::stringstream line_stream (line); vector nodes; while (1) { @@ -238,11 +289,11 @@ void run () std::mutex mutex; ProgressBar progress ("generating exemplars for connectome", count); if (assignments_pairs.size()) { - auto loader = [&] (Tractography::Connectome::Streamline_nodepair& out) { if (!reader (out)) return false; out.set_nodes (assignments_pairs[out.index]); return true; }; + auto loader = [&] (Tractography::Connectome::Streamline_nodepair& out) { if (!reader (out)) return false; out.set_nodes (assignments_pairs[out.get_index()]); return true; }; auto worker = [&] (const Tractography::Connectome::Streamline_nodepair& in) { generator (in); std::lock_guard lock (mutex); ++progress; return true; }; Thread::run_queue (loader, Thread::batch (Tractography::Connectome::Streamline_nodepair()), Thread::multi (worker)); } else { - auto loader = [&] (Tractography::Connectome::Streamline_nodelist& out) { if (!reader (out)) return false; out.set_nodes (assignments_lists[out.index]); return true; }; + auto loader = [&] (Tractography::Connectome::Streamline_nodelist& out) { if (!reader (out)) return false; out.set_nodes (assignments_lists[out.get_index()]); return true; }; auto worker = [&] (const Tractography::Connectome::Streamline_nodelist& in) { generator (in); std::lock_guard lock (mutex); ++progress; return true; }; Thread::run_queue (loader, Thread::batch (Tractography::Connectome::Streamline_nodelist()), Thread::multi (worker)); } @@ -329,14 +380,14 @@ void run () if (assignments_pairs.size()) { Tractography::Connectome::Streamline_nodepair tck; while (reader (tck)) { - tck.set_nodes (assignments_pairs[tck.index]); + tck.set_nodes (assignments_pairs[tck.get_index()]); writer (tck); ++progress; } } else { Tractography::Connectome::Streamline_nodelist tck; while (reader (tck)) { - tck.set_nodes (assignments_lists[tck.index]); + tck.set_nodes (assignments_lists[tck.get_index()]); writer (tck); ++progress; } diff --git a/cmd/connectomeedit.cpp b/cmd/connectomeedit.cpp new file mode 100644 index 0000000000..d81612995e --- /dev/null +++ b/cmd/connectomeedit.cpp @@ -0,0 +1,71 @@ +/* Copyright (c) 2008-2019 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#include "command.h" +#include "connectome/enhance.h" + +using namespace MR; +using namespace MR::Connectome; +using namespace MR::Math; +using namespace App; + + +const char* operations[] = { + "to_symmetric", + "upper_triangular", + "lower_triangular", + "transpose", + "zero_diagonal", + NULL +}; + + +void usage () +{ + AUTHOR = "Matteo Frigo (matteo.frigo@inria.fr)"; + + SYNOPSIS = "Perform basic operations on a connectome"; + + ARGUMENTS + + Argument ("input", "the input connectome.").type_text () + + + Argument ("operation", "the operation to apply, one of: " + join(operations, ", ") + ".").type_choice (operations) + + + Argument ("output", "the output connectome.").type_text(); +} + + + +void run () +{ + MR::Connectome::matrix_type connectome = load_matrix (argument[0]); + MR::Connectome::check(connectome); + const int op = argument[1]; + const std::string& output_path = argument[2]; + + INFO("Applying \'" + str(operations[op]) + "\' transformation to the input connectome."); + + switch (op) { + case 0: MR::Connectome::to_symmetric (connectome); break; + case 1: MR::Connectome::to_upper (connectome); break; + case 2: MR::Connectome::to_upper (connectome); connectome.transposeInPlace(); break; + case 3: connectome.transposeInPlace(); break; + case 4: connectome.matrix().diagonal().setZero(); break; + default: assert (0); + } + + MR::save_matrix(connectome, output_path); +} \ No newline at end of file diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index f55ad99df6..4beeb03d79 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -1,25 +1,28 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "types.h" #include "file/path.h" +#include "math/stats/fwe.h" #include "math/stats/glm.h" -#include "math/stats/permutation.h" +#include "math/stats/import.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" #include "connectome/enhance.h" @@ -30,9 +33,15 @@ using namespace MR; using namespace App; +using namespace MR::Math::Stats; +using namespace MR::Math::Stats::GLM; + +using Math::Stats::matrix_type; +using Math::Stats::vector_type; +using Stats::PermTest::count_matrix_type; -const char* algorithms[] = { "nbs", "nbse", "none", nullptr }; +const char* algorithms[] = { "nbs", "tfnbs", "none", nullptr }; @@ -41,6 +50,8 @@ const char* algorithms[] = { "nbs", "nbse", "none", nullptr }; #define TFCE_E_DEFAULT 0.4 #define TFCE_H_DEFAULT 3.0 +#define EMPIRICAL_SKEW_DEFAULT 1.0 + void usage () @@ -49,6 +60,19 @@ void usage () SYNOPSIS = "Connectome group-wise statistics at the edge level using non-parametric permutation testing"; + DESCRIPTION + + "For the TFNBS algorithm, default parameters for statistical enhancement " + "have been set based on the work in: \n" + "Vinokur, L.; Zalesky, A.; Raffelt, D.; Smith, R.E. & Connelly, A. A Novel Threshold-Free Network-Based Statistics Method: Demonstration using Simulated Pathology. OHBM, 2015, 4144; \n" + "and: \n" + "Vinokur, L.; Zalesky, A.; Raffelt, D.; Smith, R.E. & Connelly, A. A novel threshold-free network-based statistical method: Demonstration and parameter optimisation using in vivo simulated pathology. In Proc ISMRM, 2015, 2846. \n" + "Note however that not only was the optimisation of these parameters not " + "very precise, but the outcomes of statistical inference (for both this " + "algorithm and the NBS method) can vary markedly for even small changes to " + "enhancement parameters. Therefore the specificity of results obtained using " + "either of these methods should be interpreted with caution." + + Math::Stats::GLM::column_ones_description; + ARGUMENTS + Argument ("input", "a text file listing the file names of the input connectomes").type_file_in () @@ -56,33 +80,34 @@ void usage () + Argument ("algorithm", "the algorithm to use in network-based clustering/enhancement. " "Options are: " + join(algorithms, ", ")).type_choice (algorithms) - + Argument ("design", "the design matrix. Note that a column of 1's will need to be added for correlations.").type_file_in () + + Argument ("design", "the design matrix").type_file_in () - + Argument ("contrast", "the contrast vector, specified as a single row of weights").type_file_in () + + Argument ("contrast", "the contrast matrix").type_file_in () + Argument ("output", "the filename prefix for all output.").type_text(); OPTIONS - + Stats::PermTest::Options (true) + + Math::Stats::shuffle_options (true, EMPIRICAL_SKEW_DEFAULT) // TODO OptionGroup these, and provide a generic loader function + Stats::TFCE::Options (TFCE_DH_DEFAULT, TFCE_E_DEFAULT, TFCE_H_DEFAULT) + + Math::Stats::GLM::glm_options ("edge") + + OptionGroup ("Additional options for connectomestats") + Option ("threshold", "the t-statistic value to use in threshold-based clustering algorithms") + Argument ("value").type_float (0.0); - REFERENCES + "* If using the NBS algorithm: \n" "Zalesky, A.; Fornito, A. & Bullmore, E. T. Network-based statistic: Identifying differences in brain networks. \n" "NeuroImage, 2010, 53, 1197-1207" - + "* If using the NBSE algorithm: \n" - "Vinokur, L.; Zalesky, A.; Raffelt, D.; Smith, R.E. & Connelly, A. A Novel Threshold-Free Network-Based Statistics Method: Demonstration using Simulated Pathology. \n" - "OHBM, 2015, 4144" + + "* If using the TFNBS algorithm: \n" + "Baggio, H.C.; Abos, A.; Segura, B.; Campabadal, A.; Garcia-Diaz, A.; Uribe, C.; Compta, Y.; Marti, M.J.; Valldeoriola, F.; Junque, C. Statistical inference in brain graphs using threshold-free network-based statistics." + "HBM, 2018, 39, 2289-2302" + "* If using the -nonstationary option: \n" "Salimi-Khorshidi, G.; Smith, S.M. & Nichols, T.E. Adjusting the effect of nonstationarity in cluster-based and TFCE inference. \n" @@ -92,47 +117,74 @@ void usage () -using Math::Stats::matrix_type; -using Math::Stats::vector_type; - - void load_tfce_parameters (Stats::TFCE::Wrapper& enhancer) { const default_type dH = get_option_value ("tfce_dh", TFCE_DH_DEFAULT); - const default_type E = get_option_value ("tfce_e", TFCE_E_DEFAULT); - const default_type H = get_option_value ("tfce_h", TFCE_H_DEFAULT); + const default_type E = get_option_value ("tfce_e", TFCE_E_DEFAULT); + const default_type H = get_option_value ("tfce_h", TFCE_H_DEFAULT); enhancer.set_tfce_parameters (dH, E, H); } +// Define data importer class that will obtain connectome data for a +// specific subject based on the string path to the image file for +// that subject +class SubjectConnectomeImport : public SubjectDataImportBase +{ MEMALIGN(SubjectConnectomeImport) + public: + SubjectConnectomeImport (const std::string& path) : + SubjectDataImportBase (path) + { + auto M = load_matrix (path); + Connectome::check (M); + if (Connectome::is_directed (M)) + throw Exception ("Connectome from file \"" + Path::basename (path) + "\" is a directed matrix"); + Connectome::to_upper (M); + Connectome::Mat2Vec mat2vec (M.rows()); + mat2vec.M2V (M, data); + } + + void operator() (matrix_type::RowXpr row) const override + { + assert (row.size() == data.size()); + row = data; + } + + default_type operator[] (const size_t index) const override + { + assert (index < size_t(data.size())); + return (data[index]); + } + + size_t size() const override { return data.size(); } + + private: + vector_type data; + +}; + + + void run() { - // Read filenames - vector filenames; - { - std::string folder = Path::dirname (argument[0]); - std::ifstream ifs (argument[0].c_str()); - std::string temp; - while (getline (ifs, temp)) { - std::string filename (Path::join (folder, temp)); - size_t p = filename.find_last_not_of(" \t"); - if (std::string::npos != p) - filename.erase(p+1); - if (filename.size()) { - if (!MR::Path::exists (filename)) - throw Exception ("Input connectome file not found: \"" + filename + "\""); - filenames.push_back (filename); - } - } + // Read file names and check files exist + CohortDataImport importer; + importer.initialise (argument[0]); + CONSOLE ("Number of inputs: " + str(importer.size())); + const size_t num_edges = importer[0]->size(); + + for (size_t i = 1; i < importer.size(); ++i) { + if (importer[i]->size() != importer[0]->size()) + throw Exception ("Size of connectome for subject " + str(i) + " (file \"" + importer[i]->name() + "\" does not match that of first subject"); } - const MR::Connectome::matrix_type example_connectome = load_matrix (filenames.front()); - if (example_connectome.rows() != example_connectome.cols()) - throw Exception ("Connectome of first subject is not square (" + str(example_connectome.rows()) + " x " + str(example_connectome.cols()) + ")"); + // TODO Could determine this from the vector length with the right equation + const MR::Connectome::matrix_type example_connectome = load_matrix (importer[0]->name()); const MR::Connectome::node_t num_nodes = example_connectome.rows(); + Connectome::Mat2Vec mat2vec (num_nodes); // Initialise enhancement algorithm std::shared_ptr enhancer; @@ -162,156 +214,164 @@ void run() throw Exception ("Unknown enhancement algorithm"); } - size_t num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); - const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); - size_t nperms_nonstationary = get_option_value ("nperms_nonstationarity", DEFAULT_NUMBER_PERMUTATIONS_NONSTATIONARITY); + const bool do_nonstationarity_adjustment = get_options ("nonstationarity").size(); + const default_type empirical_skew = get_option_value ("skew_nonstationarity", EMPIRICAL_SKEW_DEFAULT); // Load design matrix const matrix_type design = load_matrix (argument[2]); - if (size_t(design.rows()) != filenames.size()) - throw Exception ("number of subjects does not match number of rows in design matrix"); - - // Load permutations file if supplied - auto opt = get_options("permutations"); - vector > permutations; - if (opt.size()) { - permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - num_perms = permutations.size(); - if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); + if (size_t(design.rows()) != importer.size()) + throw Exception ("number of subjects (" + str(importer.size()) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); + + // Before validating the contrast matrix, we first need to see if there are any + // additional design matrix columns coming from edge-wise subject data + vector extra_columns; + bool nans_in_columns = false; + auto opt = get_options ("column"); + for (size_t i = 0; i != opt.size(); ++i) { + extra_columns.push_back (CohortDataImport()); + extra_columns[i].initialise (opt[i][0]); + if (!extra_columns[i].allFinite()) + nans_in_columns = true; } - - // Load non-stationary correction permutations file if supplied - opt = get_options("permutations_nonstationary"); - vector > permutations_nonstationary; - if (opt.size()) { - permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - nperms_nonstationary = permutations.size(); - if (permutations_nonstationary[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); + const ssize_t num_factors = design.cols() + extra_columns.size(); + CONSOLE ("Number of factors: " + str(num_factors)); + if (extra_columns.size()) { + CONSOLE ("Number of element-wise design matrix columns: " + str(extra_columns.size())); + if (nans_in_columns) + CONSOLE ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from edge-wise design matrices accordingly"); } - - - // Load contrast matrix - matrix_type contrast = load_matrix (argument[3]); - if (contrast.cols() > design.cols()) - throw Exception ("too many contrasts for design matrix"); - contrast.conservativeResize (contrast.rows(), design.cols()); + check_design (design, extra_columns.size()); + + // Load variance groups + auto variance_groups = GLM::load_variance_groups (design.rows()); + const size_t num_vgs = variance_groups.size() ? variance_groups.maxCoeff()+1 : 1; + if (num_vgs > 1) + CONSOLE ("Number of variance groups: " + str(num_vgs)); + + // Load hypotheses + const vector hypotheses = Math::Stats::GLM::load_hypotheses (argument[3]); + const size_t num_hypotheses = hypotheses.size(); + if (hypotheses[0].cols() != num_factors) + throw Exception ("the number of columns in the contrast matrix (" + str(hypotheses[0].cols()) + ")" + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); + CONSOLE ("Number of hypotheses: " + str(num_hypotheses)); const std::string output_prefix = argument[4]; // Load input data // For compatibility with existing statistics code, symmetric matrix data is adjusted - // into vector form - one row per edge in the symmetric connectome. The Mat2Vec class - // deals with the re-ordering of matrix data into this form. - MR::Connectome::Mat2Vec mat2vec (num_nodes); - const size_t num_edges = mat2vec.vec_size(); - matrix_type data (num_edges, filenames.size()); + // into vector form - one row per edge in the symmetric connectome. This has already + // been performed when the CohortDataImport class is initialised. + matrix_type data (importer.size(), num_edges); { - ProgressBar progress ("Loading input connectome data", filenames.size()); - for (size_t subject = 0; subject < filenames.size(); subject++) { - - const std::string& path (filenames[subject]); - MR::Connectome::matrix_type subject_data; - try { - subject_data = load_matrix (path); - } catch (Exception& e) { - throw Exception (e, "Error loading connectome data for subject #" + str(subject) + " (file \"" + path + "\""); - } - - try { - MR::Connectome::to_upper (subject_data); - if (size_t(subject_data.rows()) != num_nodes) - throw Exception ("Connectome matrix is not the correct size (" + str(subject_data.rows()) + ", should be " + str(num_nodes) + ")"); - } catch (Exception& e) { - throw Exception (e, "Connectome for subject #" + str(subject) + " (file \"" + path + "\") invalid"); - } - - for (size_t i = 0; i != num_edges; ++i) - data(i, subject) = subject_data (mat2vec(i).first, mat2vec(i).second); - + ProgressBar progress ("Agglomerating input connectome data", importer.size()); + for (size_t subject = 0; subject < importer.size(); subject++) { + (*importer[subject]) (data.row (subject)); ++progress; } } + const bool nans_in_data = data.allFinite(); - { - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation...", contrast.cols() + 3); + // Only add contrast matrix row number to image outputs if there's more than one hypothesis + auto postfix = [&] (const size_t i) { return (num_hypotheses > 1) ? ("_" + hypotheses[i].name()) : ""; }; - const matrix_type betas = Math::Stats::GLM::solve_betas (data, design); - for (size_t i = 0; i < size_t(contrast.cols()); ++i) { - save_matrix (mat2vec.V2M (betas.col(i)), output_prefix + "_beta_" + str(i) + ".csv"); + { + matrix_type betas (num_factors, num_edges); + matrix_type abs_effect_size (num_edges, num_hypotheses); + matrix_type std_effect_size (num_edges, num_hypotheses); + matrix_type stdev (num_vgs, num_edges); + vector_type cond (num_edges); + + Math::Stats::GLM::all_stats (data, design, extra_columns, hypotheses, variance_groups, + cond, betas, abs_effect_size, std_effect_size, stdev); + + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_hypotheses) + num_vgs + (nans_in_data || extra_columns.size() ? 1 : 0)); + for (ssize_t i = 0; i != num_factors; ++i) { + save_matrix (mat2vec.V2M (betas.row(i)), output_prefix + "beta_" + str(i) + ".csv"); ++progress; } - - const matrix_type abs_effects = Math::Stats::GLM::abs_effect_size (data, design, contrast); - save_matrix (mat2vec.V2M (abs_effects.col(0)), output_prefix + "_abs_effect.csv"); - ++progress; - - const matrix_type std_effects = Math::Stats::GLM::std_effect_size (data, design, contrast); - matrix_type first_std_effect = mat2vec.V2M (std_effects.col (0)); - for (MR::Connectome::node_t i = 0; i != num_nodes; ++i) { - for (MR::Connectome::node_t j = 0; j != num_nodes; ++j) { - if (!std::isfinite (first_std_effect (i, j))) - first_std_effect (i, j) = 0.0; + for (size_t i = 0; i != num_hypotheses; ++i) { + if (!hypotheses[i].is_F()) { + save_matrix (mat2vec.V2M (abs_effect_size.col(i)), "abs_effect" + postfix(i) + ".csv"); + ++progress; + if (num_vgs == 1) + save_matrix (mat2vec.V2M (std_effect_size.col(i)), "std_effect" + postfix(i) + ".csv"); + } else { + ++progress; } + ++progress; } - save_matrix (first_std_effect, output_prefix + "_std_effect.csv"); - ++progress; - - const matrix_type stdev = Math::Stats::GLM::stdev (data, design); - save_matrix (mat2vec.V2M(stdev.row(0)), output_prefix + "_std_dev.csv"); - } - - Math::Stats::GLMTTest glm_ttest (data, design, contrast); - - // If performing non-stationarity adjustment we need to pre-compute the empirical statistic - vector_type empirical_statistic; - if (do_nonstationary_adjustment) { - empirical_statistic = vector_type::Zero (num_edges); - if (permutations_nonstationary.size()) { - Stats::PermTest::PermutationStack perm_stack (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment..."); - Stats::PermTest::precompute_empirical_stat (glm_ttest, enhancer, perm_stack, empirical_statistic); + if (nans_in_data || extra_columns.size()) { + save_matrix (mat2vec.V2M (cond), "cond.csv"); + ++progress; + } + if (num_vgs == 1) { + save_matrix (mat2vec.V2M (stdev.row(0)), "std_dev.csv"); } else { - Stats::PermTest::PermutationStack perm_stack (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment...", true); - Stats::PermTest::precompute_empirical_stat (glm_ttest, enhancer, perm_stack, empirical_statistic); + for (size_t i = 0; i != num_vgs; ++i) { + save_matrix (mat2vec.V2M (stdev.row(i)), "std_dev" + str(i) + ".csv"); + ++progress; + } } - save_matrix (mat2vec.V2M (empirical_statistic), output_prefix + "_empirical.csv"); } - // Precompute default statistic and enhanced statistic - vector_type tvalue_output (num_edges); - vector_type enhanced_output (num_edges); + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + if (variance_groups.size()) + glm_test.reset (new GLM::TestVariableHeteroscedastic (extra_columns, data, design, hypotheses, variance_groups, nans_in_data, nans_in_columns)); + else + glm_test.reset (new GLM::TestVariableHomoscedastic (extra_columns, data, design, hypotheses, nans_in_data, nans_in_columns)); + } else { + if (variance_groups.size()) + glm_test.reset (new GLM::TestFixedHeteroscedastic (data, design, hypotheses, variance_groups)); + else + glm_test.reset (new GLM::TestFixedHomoscedastic (data, design, hypotheses)); + } - Stats::PermTest::precompute_default_permutation (glm_ttest, enhancer, empirical_statistic, enhanced_output, std::shared_ptr(), tvalue_output); + // If performing non-stationarity adjustment we need to pre-compute the empirical statistic + matrix_type empirical_statistic; + if (do_nonstationarity_adjustment) { + empirical_statistic = matrix_type::Zero (num_edges, num_hypotheses); + Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_skew, empirical_statistic); + for (size_t i = 0; i != num_hypotheses; ++i) + save_matrix (mat2vec.V2M (empirical_statistic.col(i)), output_prefix + "empirical" + postfix(i) + ".csv"); + } - save_matrix (mat2vec.V2M (tvalue_output), output_prefix + "_tvalue.csv"); - save_matrix (mat2vec.V2M (enhanced_output), output_prefix + "_enhanced.csv"); + // Precompute default statistic, Z-transformation of such, and enhanced statistic + matrix_type default_statistic, default_zstat, default_enhanced; + Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_statistic, default_statistic, default_zstat, default_enhanced); + for (size_t i = 0; i != num_hypotheses; ++i) { + save_matrix (mat2vec.V2M (default_statistic.col(i)), output_prefix + (hypotheses[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (default_zstat .col(i)), output_prefix + "Zstat" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (default_enhanced .col(i)), output_prefix + "enhanced" + postfix(i) + ".csv"); + } // Perform permutation testing if (!get_options ("notest").size()) { - // FIXME Getting NANs in the null distribution - // Check: was result of pre-nulled subject data - vector_type null_distribution (num_perms); - vector_type uncorrected_pvalues (num_edges); + const bool fwe_strong = get_option_value ("strong", false); + if (fwe_strong && num_hypotheses == 1) { + WARN("Option -strong has no effect when testing a single hypothesis only"); + } - if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_ttest, enhancer, empirical_statistic, - enhanced_output, std::shared_ptr(), - null_distribution, std::shared_ptr(), - uncorrected_pvalues, std::shared_ptr()); + matrix_type null_distribution, uncorrected_pvalues; + count_matrix_type null_contributions; + Stats::PermTest::run_permutations (glm_test, enhancer, empirical_statistic, default_enhanced, fwe_strong, + null_distribution, null_contributions, uncorrected_pvalues); + if (fwe_strong) { + save_vector (null_distribution.col(0), output_prefix + "null_dist.txt"); } else { - Stats::PermTest::run_permutations (num_perms, glm_ttest, enhancer, empirical_statistic, - enhanced_output, std::shared_ptr(), - null_distribution, std::shared_ptr(), - uncorrected_pvalues, std::shared_ptr()); + for (size_t i = 0; i != num_hypotheses; ++i) + save_vector (null_distribution.col(i), output_prefix + "null_dist" + postfix(i) + ".txt"); + } + const matrix_type pvalue_output = MR::Math::Stats::fwe_pvalue (null_distribution, default_enhanced); + for (size_t i = 0; i != num_hypotheses; ++i) { + save_matrix (mat2vec.V2M (pvalue_output.col(i)), output_prefix + "fwe_1mpvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (uncorrected_pvalues.col(i)), output_prefix + "uncorrected_pvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (null_contributions.col(i)), output_prefix + "null_contributions" + postfix(i) + ".csv"); } - - save_vector (null_distribution, output_prefix + "_null_dist.txt"); - vector_type pvalue_output (num_edges); - Math::Stats::Permutation::statistic2pvalue (null_distribution, enhanced_output, pvalue_output); - save_matrix (mat2vec.V2M (pvalue_output), output_prefix + "_fwe_pvalue.csv"); - save_matrix (mat2vec.V2M (uncorrected_pvalues), output_prefix + "_uncorrected_pvalue.csv"); } diff --git a/cmd/dcmedit.cpp b/cmd/dcmedit.cpp index 63d8b028c9..5fd686bee7 100644 --- a/cmd/dcmedit.cpp +++ b/cmd/dcmedit.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "debug.h" #include "file/path.h" @@ -44,8 +45,8 @@ void usage () OPTIONS + Option ("anonymise", "remove any identifiable information, by replacing the following tags:\n" - " - any tag with Value Representation PN will be replaced with 'anonymous'\n" - " - tag (0010,0030) PatientBirthDate will be replaced with an empty string\n\n" + "- any tag with Value Representation PN will be replaced with 'anonymous'\n" + "- tag (0010,0030) PatientBirthDate will be replaced with an empty string\n" "WARNING: there is no guarantee that this command will remove all identiable " "information, since such information may be contained in any number " "of private vendor-specific tags. You will need to double-check the " @@ -104,8 +105,8 @@ void run () opt = get_options ("tag"); - if (opt.size()) - for (size_t n = 0; n < opt.size(); ++n) + if (opt.size()) + for (size_t n = 0; n < opt.size(); ++n) tags.push_back (Tag (read_hex (opt[n][0]), read_hex (opt[n][1]), opt[n][2])); opt = get_options ("id"); @@ -120,7 +121,7 @@ void run () VR.i = VRs[n]; INFO (std::string ("clearing entries with VR \"") + VR.c[1] + VR.c[0] + "\""); } - for (size_t n = 0; n < tags.size(); ++n) + for (size_t n = 0; n < tags.size(); ++n) INFO ("replacing tag (" + hex(tags[n].group) + "," + hex(tags[n].element) + ") with value \"" + tags[n].newvalue + "\""); File::Dicom::Element item; diff --git a/cmd/dcminfo.cpp b/cmd/dcminfo.cpp index 63b96a276b..048e26d613 100644 --- a/cmd/dcminfo.cpp +++ b/cmd/dcminfo.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "debug.h" #include "file/path.h" @@ -35,7 +36,8 @@ void usage () OPTIONS + Option ("all", "print all DICOM fields.") - + Option ("csa", "print all Siemens CSA fields") + + Option ("csa", "print all Siemens CSA fields (excluding Phoenix unless requested)") + + Option ("phoenix", "print Siemens Phoenix protocol information") + Option ("tag", "print field specified by the group & element tags supplied. " "Tags should be supplied as Hexadecimal (i.e. as they appear in the -all listing).") @@ -85,13 +87,17 @@ void run () File::Dicom::QuickScan reader; - if (get_options("all").size()) + const bool all = get_options("all").size(); + const bool csa = get_options("csa").size(); + const bool phoenix = get_options("phoenix").size(); + + if (all) print (File::Dicom::Element::print_header()); - if (reader.read (argument[0], get_options ("all").size(), get_options ("csa").size(), true)) + if (reader.read (argument[0], all, csa, phoenix, true)) throw Exception ("error reading file \"" + reader.filename + "\""); - if (!get_options ("all").size() && !get_options ("csa").size()) + if (!all && !csa && !phoenix) std::cout << reader; } diff --git a/cmd/dirflip.cpp b/cmd/dirflip.cpp index 03ab0ef20a..4d639420b8 100644 --- a/cmd/dirflip.cpp +++ b/cmd/dirflip.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "math/rng.h" @@ -32,8 +33,7 @@ void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)"; - SYNOPSIS = "Optimise the polarity of the directions in a scheme with respect to a " - "unipolar electrostatic repulsion model, by inversion of individual directions"; + SYNOPSIS = "Invert the polarity of individual directions so as to optimise a unipolar electrostatic repulsion model"; DESCRIPTION + "The orientations themselves are not affected, only their " @@ -46,7 +46,7 @@ void usage () OPTIONS - + Option ("permutations", "number of permutations to try.") + + Option ("permutations", "number of permutations to try (default: " + str(default_permutations) + ")") + Argument ("num").type_integer (1) + Option ("cartesian", "Output the directions in Cartesian coordinates [x y z] instead of [az el]."); diff --git a/cmd/dirgen.cpp b/cmd/dirgen.cpp index 9724807905..954de880b8 100644 --- a/cmd/dirgen.cpp +++ b/cmd/dirgen.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "math/rng.h" diff --git a/cmd/dirmerge.cpp b/cmd/dirmerge.cpp index a994a592bb..6c107a90e7 100644 --- a/cmd/dirmerge.cpp +++ b/cmd/dirmerge.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "math/rng.h" @@ -30,11 +31,10 @@ void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)"; -SYNOPSIS = "Splice or merge sets of directions over multiple shells into a single set, " - "in such a way as to maintain near-optimality upon truncation"; +SYNOPSIS = "Splice / merge multiple sets of directions in such a way as to maintain near-optimality upon truncation"; ARGUMENTS - + Argument ("subsets", "the number of subsets (phase-encode directions) per b-value").type_integer(1,10000) + + Argument ("subsets", "the number of subsets (eg. phase encoding directions) per b-value").type_integer(1,10000) + Argument ("bvalue files", "the b-value and sets of corresponding files, in order").type_text().allow_multiple() + Argument ("out", "the output directions file, with each row listing " "the X Y Z gradient directions, the b-value, and an index representing " diff --git a/cmd/dirorder.cpp b/cmd/dirorder.cpp index 368b4e2522..fad1655e63 100644 --- a/cmd/dirorder.cpp +++ b/cmd/dirorder.cpp @@ -1,22 +1,24 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "math/rng.h" #include "math/SH.h" +#include "dwi/gradient.h" #include "dwi/directions/file.h" #include @@ -29,9 +31,12 @@ void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)"; - SYNOPSIS = "Reorder a set of directions to ensure near-uniformity upon truncation - " - "i.e. if the scan is terminated early, the acquired directions are still " - "close to optimal"; + SYNOPSIS = "Reorder a set of directions to ensure near-uniformity upon truncation"; + + DESCRIPTION + + "The intent of this command is to reorder a set of gradient directions such that " + "if a scan is terminated prematurely, at any point, the acquired directions will " + "still be close to optimally distributed on the half-sphere."; ARGUMENTS + Argument ("input", "the input directions file").type_file_in() @@ -42,27 +47,16 @@ void usage () } + using value_type = double; - template -inline std::function get_rng_uniform (value_type from, value_type to) -{ - std::random_device rd; - std::mt19937 gen (rd()); - std::uniform_int_distribution dis (from, to); - return std::bind (dis, gen); -} - -void run () +vector optimise (const Eigen::MatrixXd& directions, const size_t first_volume) { - auto directions = DWI::Directions::load_cartesian (argument[0]); - auto rng = get_rng_uniform (0, directions.rows()-1); - - vector indices (1, rng()); - vector remaining; - for (ssize_t n = 0; n < directions.rows(); ++n) + vector indices (1, first_volume); + vector remaining; + for (size_t n = 0; n < size_t(directions.rows()); ++n) if (n != indices[0]) remaining.push_back (n); @@ -88,10 +82,55 @@ void run () remaining.erase (remaining.begin()+best); } + return indices; +} + + + + +value_type calc_cost (const Eigen::MatrixXd& directions, const vector& order) +{ + value_type cost = value_type(0); + const size_t start = Math::SH::NforL (2); + Eigen::MatrixXd subset (start, 3); + for (size_t i = 0; i != start; ++i) + subset.row(i) = directions.row(order[i]); + for (size_t N = start+1; N < size_t(directions.rows()); ++N) { + // Don't include condition numbers where precisely the number of coefficients + // for that spherical harmonic degree are included, as these tend to + // be outliers + const size_t lmax = Math::SH::LforN (N-1); + subset.conservativeResize (N, 3); + subset.row(N-1) = directions.row(order[N-1]); + const value_type cond = DWI::condition_number_for_lmax (subset, lmax); + cost += cond; + } + return cost; +} + + + + +void run () +{ + auto directions = DWI::Directions::load_cartesian (argument[0]); + + value_type min_cost = std::numeric_limits::infinity(); + vector best_order; + ProgressBar progress ("Determining best reordering", directions.rows()); + for (size_t first_volume = 0; first_volume != size_t(directions.rows()); ++first_volume) { + const vector order = optimise (directions, first_volume); + const value_type cost = calc_cost (directions, order); + if (cost < min_cost) { + min_cost = cost; + best_order = order; + } + ++progress; + } decltype(directions) output (directions.rows(), 3); for (ssize_t n = 0; n < directions.rows(); ++n) - output.row(n) = directions.row (indices[n]); + output.row(n) = directions.row (best_order[n]); DWI::Directions::save (output, argument[1], get_options("cartesian").size()); } diff --git a/cmd/dirsplit.cpp b/cmd/dirsplit.cpp index 36c4eed0c6..40408c5e0c 100644 --- a/cmd/dirsplit.cpp +++ b/cmd/dirsplit.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "math/rng.h" @@ -38,7 +39,7 @@ ARGUMENTS OPTIONS - + Option ("permutations", "number of permutations to try") + + Option ("permutations", "number of permutations to try (default: " + str(default_permutations) + ")") + Argument ("num").type_integer (1) + Option ("cartesian", "Output the directions in Cartesian coordinates [x y z] instead of [az el]."); diff --git a/cmd/dirstat.cpp b/cmd/dirstat.cpp index a99d53c13e..971931eddc 100644 --- a/cmd/dirstat.cpp +++ b/cmd/dirstat.cpp @@ -1,24 +1,27 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "header.h" -#include "dwi/directions/file.h" #include "dwi/gradient.h" #include "dwi/shells.h" +#include "math/sphere.h" + +#include "dwi/directions/file.h" @@ -51,15 +54,28 @@ void usage () "the direction set (important with respect to eddy-current resilience)." + "Specific metrics can also be queried independently via the \"-output\" " - "option, using these shorthands: U/B for unipolar/bipolar model, E/N " - "for energy and nearest-neighbour respectively, t/-/+ for total/min/max " - "respectively (mean implied otherwise); SHn for condition number of SH fit " - "at order n (with n an even integer); ASYM for asymmetry index (norm of " - "mean direction vector); and N for the number of directions. For example:" - + "-output BN,BN-,BN+ requests the mean, min and max nearest-neighour " - "angles assuming a bipolar model." - + "-output UE,SH8,SYM requests the mean unipolar electrostatic energy, " - "condition number of SH fit at order 8, and the asymmetry index."; + "option, using these shorthands: \n" + "U/B for unipolar/bipolar model, \n" + "E/N for energy and nearest-neighbour respectively, \n" + "t/-/+ for total/min/max respectively (mean implied otherwise); \n" + "SHn for condition number of SH fit at order n (with n an even integer); \n" + "ASYM for asymmetry index (norm of mean direction vector); \n" + "N for the number of directions."; + + EXAMPLES + + Example ("Default usage", + "dirstat directions.txt", + "This provides a pretty-printed list of all metrics available.") + + + Example ("Write a single metric of interest to standard output", + "dirstat grad.b -shell 3000 -output SH8", + "requests the condition number of SH fit of b=3000 shell " + "directions at SH order 8") + + + Example ("Write multiple metrics of interest to standard output", + "dirstat dwi.mif -output BN,BN-,BN+", + "requests the mean, min and max nearest-neighour " + "angles assuming a bipolar model."); ARGUMENTS + Argument ("dirs", "the text file or image containing the directions.").type_file_in(); @@ -95,7 +111,7 @@ void run () } catch (Exception& E) { auto header = Header::open (argument[0]); - directions = DWI::get_valid_DW_scheme (header); + directions = DWI::get_DW_scheme (header); } } @@ -183,7 +199,7 @@ Metrics compute (Eigen::MatrixXd& directions) { if (directions.cols() < 3) throw Exception ("unexpected matrix size for scheme \"" + str(argument[0]) + "\""); - DWI::normalise_grad (directions); + Math::Sphere::normalise_cartesian (directions); vector NN_bipolar (directions.rows(), -1.0); vector NN_unipolar (directions.rows(), -1.0); diff --git a/cmd/dwi2adc.cpp b/cmd/dwi2adc.cpp index 1c8976f9cb..f6e5f4f2ad 100644 --- a/cmd/dwi2adc.cpp +++ b/cmd/dwi2adc.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "phase_encoding.h" @@ -32,11 +33,11 @@ void usage () SYNOPSIS = "Convert mean dwi (trace-weighted) images to mean ADC maps"; - ARGUMENTS + ARGUMENTS + Argument ("input", "the input image.").type_image_in () + Argument ("output", "the output image.").type_image_out (); - OPTIONS + OPTIONS + DWI::GradImportOptions(); } @@ -50,8 +51,8 @@ class DWI2ADC { MEMALIGN(DWI2ADC) public: DWI2ADC (const Eigen::MatrixXd& binv, size_t dwi_axis) : dwi (binv.cols()), - adc (2), - binv (binv), + adc (2), + binv (binv), dwi_axis (dwi_axis) { } template @@ -80,7 +81,7 @@ class DWI2ADC { MEMALIGN(DWI2ADC) void run () { auto dwi = Header::open (argument[0]).get_image(); - auto grad = DWI::get_valid_DW_scheme (dwi); + auto grad = DWI::get_DW_scheme (dwi); size_t dwi_axis = 3; while (dwi.size (dwi_axis) < 2) @@ -97,10 +98,10 @@ void run () { Header header (dwi); header.datatype() = DataType::Float32; - header.ndim() = 4; - header.size(3) = 2; DWI::stash_DW_scheme (header, grad); PhaseEncoding::clear_scheme (header); + header.ndim() = 4; + header.size(3) = 2; auto adc = Image::create (argument[1], header); diff --git a/cmd/dwi2fod.cpp b/cmd/dwi2fod.cpp index fbb254c874..26d1cee739 100644 --- a/cmd/dwi2fod.cpp +++ b/cmd/dwi2fod.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "header.h" #include "image.h" @@ -20,9 +21,10 @@ #include "algo/threaded_loop.h" #include "dwi/gradient.h" #include "dwi/shells.h" +#include "math/SH.h" + #include "dwi/sdeconv/csd.h" #include "dwi/sdeconv/msmt_csd.h" -#include "math/SH.h" using namespace MR; @@ -47,17 +49,15 @@ OptionGroup CommonOptions = OptionGroup ("Options common to more than one algori "provided as a comma-separated list of integers, one for " "each output image; for single-output algorithms, only " "a single integer should be provided. If omitted, the " - "command will use the highest possible lmax given the " - "diffusion gradient table, up to a maximum of 8.") + "command will use the lmax of the corresponding response " + "function (i.e based on its number of coefficients), " + "up to a maximum of 8.") + Argument ("order").type_sequence_int() + Option ("mask", "only perform computation within the specified binary brain mask image.") + Argument ("image").type_image_in(); - - - void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com) and Ben Jeurissen (ben.jeurissen@uantwerpen.be)"; @@ -67,6 +67,22 @@ void usage () DESCRIPTION + Math::SH::encoding_description; + EXAMPLES + + Example ("Perform single-shell single-tissue CSD", + "dwi2fod csd dwi.mif response_wm.txt wmfod.mif", + "This algorithm is designed for single-shell data and only uses a single " + "b-value. The response function text file provided should only contain a " + "a single row, corresponding to the b-value used for CSD.") + + + Example ("Perform multi-shell multi-tissue CSD", + "dwi2fod msmt_csd dwi.mif response_wm.txt wmfod.mif response_gm.txt gm.mif response_csf.txt csf.mif", + "This example is the most common use case of multi-tissue CSD, estimating " + "a white matter FOD, and grey matter and CSF compartments. This algorithm " + "requires at least three unique b-values to estimate three tissue compartments. " + "Each response function text file should have a number of rows equal to the " + "number of b-values used. If only two unique b-values are available, it's also " + "possible to estimate only two tissue compartments, e.g., white matter and CSF."); + REFERENCES + "* If using csd algorithm:\n" "Tournier, J.-D.; Calamante, F. & Connelly, A. " // Internal @@ -76,12 +92,12 @@ void usage () + "* If using msmt_csd algorithm:\n" "Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. " // Internal - "Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data " + "Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. " "NeuroImage, 2014, 103, 411-426" + "Tournier, J.-D.; Calamante, F., Gadian, D.G. & Connelly, A. " // Internal "Direct estimation of the fiber orientation density function from " - "diffusion-weighted MRI data using spherical deconvolution." + "diffusion-weighted MRI data using spherical deconvolution. " "NeuroImage, 2004, 23, 1176-1185"; ARGUMENTS @@ -95,6 +111,7 @@ void usage () + DWI::ShellsOption + CommonOptions + DWI::SDeconv::CSD_options + + DWI::SDeconv::MSMT_CSD_options + Stride::Options; } @@ -126,7 +143,7 @@ class CSD_Processor { MEMALIGN(CSD_Processor) INFO ("voxel [ " + str (dwi.index(0)) + " " + str (dwi.index(1)) + " " + str (dwi.index(2)) + " ] did not reach full convergence"); - write_back (fod); + fod.row(3) = sdeconv.FOD(); } @@ -156,11 +173,6 @@ class CSD_Processor { MEMALIGN(CSD_Processor) } - void write_back (Image& fod) { - for (auto l = Loop (3) (fod); l; ++l) - fod.value() = sdeconv.FOD() [fod.index(3)]; - } - }; @@ -168,10 +180,12 @@ class CSD_Processor { MEMALIGN(CSD_Processor) class MSMT_Processor { MEMALIGN (MSMT_Processor) public: - MSMT_Processor (const DWI::SDeconv::MSMT_CSD::Shared& shared, Image& mask_image, vector< Image > odf_images) : + MSMT_Processor (const DWI::SDeconv::MSMT_CSD::Shared& shared, Image& mask_image, + vector< Image > odf_images, Image dwi_modelled = Image()) : sdeconv (shared), mask_image (mask_image), odf_images (odf_images), + modelled_image (dwi_modelled), dwi_data (shared.grad.rows()), output_data (shared.problem.H.cols()) { } @@ -184,8 +198,7 @@ class MSMT_Processor { MEMALIGN (MSMT_Processor) return; } - for (auto l = Loop (3) (dwi_image); l; ++l) - dwi_data[dwi_image.index(3)] = dwi_image.value(); + dwi_data = dwi_image.row(3); sdeconv (dwi_data, output_data); if (sdeconv.niter >= sdeconv.shared.problem.max_niter) { @@ -199,6 +212,12 @@ class MSMT_Processor { MEMALIGN (MSMT_Processor) for (auto l = Loop(3)(odf_images[i]); l; ++l) odf_images[i].value() = output_data[j++]; } + + if (modelled_image.valid()) { + assign_pos_of (dwi_image, 0, 3).to (modelled_image); + dwi_data = sdeconv.shared.problem.H * output_data; + modelled_image.row(3) = dwi_data; + } } @@ -206,6 +225,7 @@ class MSMT_Processor { MEMALIGN (MSMT_Processor) DWI::SDeconv::MSMT_CSD sdeconv; Image mask_image; vector< Image > odf_images; + Image modelled_image; Eigen::VectorXd dwi_data; Eigen::VectorXd output_data; }; @@ -249,9 +269,10 @@ void run () shared.init(); - header_out.size(3) = shared.nSH(); DWI::stash_DW_scheme (header_out, shared.grad); PhaseEncoding::clear_scheme (header_out); + + header_out.size(3) = shared.nSH(); auto fod = Image::create (argument[3], header_out); CSD_Processor processor (shared, mask); @@ -291,9 +312,17 @@ void run () odfs.push_back (Image (Image::create (odf_paths[i], header_out))); } - MSMT_Processor processor (shared, mask, odfs); + Image dwi_modelled; + auto opt = get_options ("predicted_signal"); + if (opt.size()) + dwi_modelled = Image::create (opt[0][0], header_in); + + MSMT_Processor processor (shared, mask, odfs, dwi_modelled); auto dwi = header_in.get_image().with_direct_io (3); - ThreadedLoop ("performing multi-shell, multi-tissue CSD", dwi, 0, 3) + ThreadedLoop ("performing MSMT CSD (" + + str(shared.num_shells()) + " shell" + (shared.num_shells() > 1 ? "s" : "") + ", " + + str(num_tissues) + " tissue" + (num_tissues > 1 ? "s" : "") + ")", + dwi, 0, 3) .run (processor, dwi); } else { diff --git a/cmd/dwi2mask.cpp b/cmd/dwi2mask.cpp index b287a055d1..a3cf870556 100644 --- a/cmd/dwi2mask.cpp +++ b/cmd/dwi2mask.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "filter/dwi_brain_mask.h" @@ -41,7 +42,7 @@ DESCRIPTION REFERENCES + "Dhollander T, Raffelt D, Connelly A. " // Internal "Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. " - "ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5."; + "ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5"; ARGUMENTS + Argument ("input", @@ -66,7 +67,7 @@ OPTIONS void run () { auto input = Image::open (argument[0]).with_direct_io (3); - auto grad = DWI::get_valid_DW_scheme (input); + auto grad = DWI::get_DW_scheme (input); if (input.ndim() != 4) throw Exception ("input DWI image must be 4D"); @@ -77,8 +78,6 @@ void run () { dwi_brain_mask_filter (input, temp_mask); Header H_out (temp_mask); - DWI::stash_DW_scheme (H_out, grad); - PhaseEncoding::clear_scheme (H_out); auto output = Image::create (argument[1], H_out); unsigned int scale = get_option_value ("clean_scale", DEFAULT_CLEAN_SCALE); diff --git a/cmd/dwi2tensor.cpp b/cmd/dwi2tensor.cpp index 4746503d86..87c58c0fa7 100644 --- a/cmd/dwi2tensor.cpp +++ b/cmd/dwi2tensor.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "phase_encoding.h" #include "progressbar.h" @@ -28,64 +29,100 @@ using value_type = float; #define DEFAULT_NITER 2 -const char* encoding_description = - "The tensor coefficients are stored in the output image as follows: \n" - "volumes 0-5: D11, D22, D33, D12, D13, D23 ; \n\n" - "If diffusion kurtosis is estimated using the -dkt option, these are stored as follows: \n" - "volumes 0-2: W1111, W2222, W3333 ; \n" - "volumes 3-8: W1112, W1113, W1222, W1333, W2223, W2333 ; \n" - "volumes 9-11: W1122, W1133, W2233 ; \n" - "volumes 12-14: W1123, W1223, W1233 ;"; +const char* const encoding_description[] = { + "The tensor coefficients are stored in the output image as follows:\n" + "volumes 0-5: D11, D22, D33, D12, D13, D23", + "If diffusion kurtosis is estimated using the -dkt option, these are stored as follows:\n" + "volumes 0-2: W1111, W2222, W3333\n" + "volumes 3-8: W1112, W1113, W1222, W1333, W2223, W2333\n" + "volumes 9-11: W1122, W1133, W2233\n" + "volumes 12-14: W1123, W1223, W1233", + nullptr +}; void usage () { + AUTHOR = "Ben Jeurissen (ben.jeurissen@uantwerpen.be)"; - REFERENCES - + "Veraart, J.; Sijbers, J.; Sunaert, S.; Leemans, A. & Jeurissen, B. " // Internal - "Weighted linear least squares estimation of diffusion MRI parameters: strengths, limitations, and pitfalls. " - "NeuroImage, 2013, 81, 335-346"; + SYNOPSIS = "Diffusion (kurtosis) tensor estimation"; + + DESCRIPTION + + "By default, the diffusion tensor (and optionally its kurtosis) is fitted to " + "the log-signal in two steps: firstly, using weighted least-squares (WLS) with " + "weights based on the empirical signal intensities; secondly, by further iterated weighted " + "least-squares (IWLS) with weights determined by the signal predictions from the " + "previous iteration (by default, 2 iterations will be performed). This behaviour can " + "be altered in two ways:" + + + "* The -ols option will cause the first fitting step to be performed using ordinary " + "least-squares (OLS); that is, all measurements contribute equally to the fit, instead of " + "the default behaviour of weighting based on the empirical signal intensities." + + + "* The -iter option controls the number of iterations of the IWLS prodedure. If this is " + "set to zero, then the output model parameters will be those resulting from the first " + "fitting step only: either WLS by default, or OLS if the -ols option is used in conjunction " + "with -iter 0." + + + encoding_description; ARGUMENTS - + Argument ("dwi", "the input dwi image.").type_image_in () - + Argument ("dt", "the output dt image.").type_image_out (); - - OPTIONS - + Option ("mask", "only perform computation within the specified binary brain mask image.") - + Argument ("image").type_image_in() - + Option ("b0", "the output b0 image.") - + Argument ("image").type_image_out() - + Option ("dkt", "the output dkt image.") - + Argument ("image").type_image_out() - + Option ("iter","number of iterative reweightings (default: " + str(DEFAULT_NITER) + "); set to 0 for ordinary linear least squares.") - + Argument ("integer").type_integer (0, 10) - + Option ("predicted_signal", "the predicted dwi image.") - + Argument ("image").type_image_out() - + DWI::GradImportOptions(); - - AUTHOR = "Ben Jeurissen (ben.jeurissen@uantwerpen.be)"; - - SYNOPSIS = "Diffusion (kurtosis) tensor estimation using iteratively reweighted linear least squares estimator"; + + Argument ("dwi", "the input dwi image.").type_image_in () + + Argument ("dt", "the output dt image.").type_image_out (); - DESCRIPTION - + encoding_description; + OPTIONS + + Option ("ols", "perform initial fit using an ordinary least-squares (OLS) fit (see Description).") + + Option ("mask", "only perform computation within the specified binary brain mask image.") + + Argument ("image").type_image_in() + + + Option ("b0", "the output b0 image.") + + Argument ("image").type_image_out() + + + Option ("dkt", "the output dkt image.") + + Argument ("image").type_image_out() + + + Option ("iter","number of iterative reweightings for IWLS algorithm (default: " + + str(DEFAULT_NITER) + ") (see Description).") + + Argument ("integer").type_integer (0, 10) + + + Option ("predicted_signal", "the predicted dwi image.") + + Argument ("image").type_image_out() + + + DWI::GradImportOptions(); + + REFERENCES + + "References based on fitting algorithm used:" + + + "* OLS, WLS:\n" + "Basser, P.J.; Mattiello, J.; LeBihan, D. " + "Estimation of the effective self-diffusion tensor from the NMR spin echo. " + "J Magn Reson B., 1994, 103, 247–254." + + + "* IWLS:\n" + "Veraart, J.; Sijbers, J.; Sunaert, S.; Leemans, A. & Jeurissen, B. " // Internal + "Weighted linear least squares estimation of diffusion MRI parameters: strengths, limitations, and pitfalls. " + "NeuroImage, 2013, 81, 335-346"; } + + template class Processor { MEMALIGN(Processor) public: - Processor (const Eigen::MatrixXd& b, const int iter, MASKType* mask_image, B0Type* b0_image, DKTType* dkt_image, PredictType* predict_image) : + Processor (const Eigen::MatrixXd& b, const bool ols, const int iter, + MASKType* mask_image, B0Type* b0_image, DKTType* dkt_image, PredictType* predict_image) : mask_image (mask_image), b0_image (b0_image), dkt_image (dkt_image), predict_image (predict_image), dwi(b.rows()), p(b.cols()), - w(b.rows()), + w(Eigen::VectorXd::Ones (b.rows())), work(b.cols(),b.cols()), llt(work.rows()), b(b), + ols (ols), maxit(iter) { } template @@ -96,36 +133,35 @@ class Processor { MEMALIGN(Processor) if (!mask_image->value()) return; } - + for (auto l = Loop (3) (dwi_image); l; ++l) dwi[dwi_image.index(3)] = dwi_image.value(); - - double small_intensity = 1.0e-6 * dwi.maxCoeff(); + + double small_intensity = 1.0e-6 * dwi.maxCoeff(); for (int i = 0; i < dwi.rows(); i++) { if (dwi[i] < small_intensity) dwi[i] = small_intensity; + w[i] = ( ols ? 1.0 : dwi[i] ); dwi[i] = std::log (dwi[i]); } - - work.setZero(); - work.selfadjointView().rankUpdate (b.transpose()); - p = llt.compute (work.selfadjointView()).solve(b.transpose()*dwi); - for (int it = 0; it < maxit; it++) { - w = (b*p).array().exp(); + + for (int it = 0; it <= maxit; it++) { work.setZero(); work.selfadjointView().rankUpdate (b.transpose()*w.asDiagonal()); p = llt.compute (work.selfadjointView()).solve(b.transpose()*w.asDiagonal()*w.asDiagonal()*dwi); + if (maxit > 1) + w = (b*p).array().exp(); + } + + for (auto l = Loop(3)(dt_image); l; ++l) { + dt_image.value() = p[dt_image.index(3)]; } - + if (b0_image) { assign_pos_of (dwi_image, 0, 3).to (*b0_image); b0_image->value() = exp(p[6]); } - - for (auto l = Loop(3)(dt_image); l; ++l) { - dt_image.value() = p[dt_image.index(3)]; - } - + if (dkt_image) { assign_pos_of (dwi_image, 0, 3).to (*dkt_image); double adc_sq = (p[0]+p[1]+p[2])*(p[0]+p[1]+p[2])/9.0; @@ -133,7 +169,7 @@ class Processor { MEMALIGN(Processor) dkt_image->value() = p[dkt_image->index(3)+7]/adc_sq; } } - + if (predict_image) { assign_pos_of (dwi_image, 0, 3).to (*predict_image); dwi = (b*p).array().exp(); @@ -141,7 +177,7 @@ class Processor { MEMALIGN(Processor) predict_image->value() = dwi[predict_image->index(3)]; } } - + } private: @@ -155,26 +191,30 @@ class Processor { MEMALIGN(Processor) Eigen::MatrixXd work; Eigen::LLT llt; const Eigen::MatrixXd& b; + const bool ols; const int maxit; }; -template -inline Processor processor (const Eigen::MatrixXd& b, const int& iter, MASKType* mask_image, B0Type* b0_image, DKTType* dkt_image, PredictType* predict_image) { - return { b, iter, mask_image, b0_image, dkt_image, predict_image }; +template +inline Processor processor (const Eigen::MatrixXd& b, const bool ols, const int iter, MASKType* mask_image, B0Type* b0_image, DKTType* dkt_image, PredictType* predict_image) { + return { b, ols, iter, mask_image, b0_image, dkt_image, predict_image }; } void run () { auto dwi = Header::open (argument[0]).get_image(); - auto grad = DWI::get_valid_DW_scheme (dwi); - + auto grad = DWI::get_DW_scheme (dwi); + Image* mask = nullptr; auto opt = get_options ("mask"); if (opt.size()) { mask = new Image (Image::open (opt[0][0])); check_dimensions (dwi, *mask, 0, 3); } - + + bool ols = get_options ("ols").size(); + + // depending on whether first (initialisation) loop should be considered an iteration auto iter = get_option_value ("iter", DEFAULT_NITER); Header header (dwi); @@ -182,13 +222,12 @@ void run () header.ndim() = 4; DWI::stash_DW_scheme (header, grad); PhaseEncoding::clear_scheme (header); - + Image* predict = nullptr; opt = get_options ("predicted_signal"); - if (opt.size()) { + if (opt.size()) predict = new Image (Image::create (opt[0][0], header)); - } - + header.size(3) = 6; auto dt = Image::create (argument[1], header); @@ -206,9 +245,9 @@ void run () header.size(3) = 15; dkt = new Image (Image::create (opt[0][0], header)); } - + Eigen::MatrixXd b = -DWI::grad2bmatrix (grad, opt.size()>0); - ThreadedLoop("computing tensors", dwi, 0, 3).run (processor (b, iter, mask, b0, dkt, predict), dwi, dt); + ThreadedLoop("computing tensors", dwi, 0, 3).run (processor (b, ols, iter, mask, b0, dkt, predict), dwi, dt); } diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index a94c96f400..4c563f9339 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2018 the MRtrix3 contributors. + * Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this @@ -15,31 +15,42 @@ #include "command.h" #include "image.h" + #include #include -#define DEFAULT_SIZE 5 using namespace MR; using namespace App; +const char* const dtypes[] = { "float32", "float64", NULL }; + +const char* const estimators[] = { "exp1", "exp2", NULL }; + void usage () { - SYNOPSIS = "Denoise DWI data and estimate the noise level based on the optimal threshold for PCA"; - + SYNOPSIS = "dMRI noise level estimation and denoising using Marchenko-Pastur PCA"; + DESCRIPTION + "DWI data denoising and noise map estimation by exploiting data redundancy in the PCA domain " - "using the prior knowledge that the eigenspectrum of random covariance matrices is described by " - "the universal Marchenko Pastur distribution." + "using the prior knowledge that the eigenspectrum of random covariance matrices is described by " + "the universal Marchenko-Pastur (MP) distribution. Fitting the MP distribution to the spectrum " + "of patch-wise signal matrices hence provides an estimator of the noise level 'sigma', as was " + "first shown in Veraart et al. (2016) and later improved in Cordero-Grande et al. (2019). This " + "noise level estimate then determines the optimal cut-off for PCA denoising." + "Important note: image denoising must be performed as the first step of the image processing pipeline. " - "The routine will fail if interpolation or smoothing has been applied to the data prior to denoising." - - + "Note that this function does not correct for non-Gaussian noise biases."; - - AUTHOR = "Daan Christiaens (daan.christiaens@kcl.ac.uk) & Jelle Veraart (jelle.veraart@nyumc.org) & J-Donald Tournier (jdtournier@gmail.com)"; - + "The routine will fail if interpolation or smoothing has been applied to the data prior to denoising." + + + "Note that this function does not correct for non-Gaussian noise biases present in " + "magnitude-reconstructed MRI images. If available, including the MRI phase data can " + "reduce such non-Gaussian biases, and the command now supports complex input data."; + + AUTHOR = "Daan Christiaens (daan.christiaens@kcl.ac.uk) & " + "Jelle Veraart (jelle.veraart@nyumc.org) & " + "J-Donald Tournier (jdtournier@gmail.com)"; + REFERENCES + "Veraart, J.; Novikov, D.S.; Christiaens, D.; Ades-aron, B.; Sijbers, J. & Fieremans, E. " // Internal "Denoising of diffusion MRI using random matrix theory. " @@ -47,8 +58,12 @@ void usage () + "Veraart, J.; Fieremans, E. & Novikov, D.S. " // Internal "Diffusion MRI noise mapping using random matrix theory. " - "Magn. Res. Med., 2016, 76(5), 1582-1593, doi: 10.1002/mrm.26059"; - + "Magn. Res. Med., 2016, 76(5), 1582-1593, doi: 10.1002/mrm.26059" + + + "Cordero-Grande, L.; Christiaens, D.; Hutter, J.; Price, A.N.; Hajnal, J.V. " // Internal + "Complex diffusion-weighted image estimation via matrix recovery under general noise models. " + "NeuroImage, 2019, 200, 391-404, doi: 10.1016/j.neuroimage.2019.06.039"; + ARGUMENTS + Argument ("dwi", "the input diffusion-weighted image.").type_image_in () @@ -56,14 +71,29 @@ void usage () OPTIONS - + Option ("mask", "only perform computation within the specified binary brain mask image.") + + Option ("mask", "Only process voxels within the specified binary brain mask image.") + Argument ("image").type_image_in() - + Option ("extent", "set the window size of the denoising filter. (default = " + str(DEFAULT_SIZE) + "," + str(DEFAULT_SIZE) + "," + str(DEFAULT_SIZE) + ")") + + Option ("extent", "Set the patch size of the denoising filter. " + "By default, the command will select the smallest isotropic patch size " + "that exceeds the number of DW images in the input data, e.g., 5x5x5 for " + "data with <= 125 DWI volumes, 7x7x7 for data with <= 343 DWI volumes, etc.") + Argument ("window").type_sequence_int () - + Option ("noise", "the output noise map.") - + Argument ("level").type_image_out(); + + Option ("noise", "The output noise map, i.e., the estimated noise level 'sigma' in the data. " + "Note that on complex input data, this will be the total noise level across " + "real and imaginary channels, so a scale factor sqrt(2) applies.") + + Argument ("level").type_image_out() + + + Option ("datatype", "Datatype for the eigenvalue decomposition (single or double precision). " + "For complex input data, this will select complex float32 or complex float64 datatypes.") + + Argument ("float32/float64").type_choice(dtypes) + + + Option ("estimator", "Select the noise level estimator (default = Exp2), either: \n" + "* Exp1: the original estimator used in Veraart et al. (2016), or \n" + "* Exp2: the improved estimator introduced in Cordero-Grande et al. (2019).") + + Argument ("Exp1/Exp2").type_choice(estimators); + COPYRIGHT = "Copyright (c) 2016 New York University, University of Antwerp, and the MRtrix3 contributors \n \n" "Permission is hereby granted, free of charge, to any non-commercial entity ('Recipient') obtaining a copy of this software and " @@ -81,29 +111,35 @@ void usage () "licenses under any patents or patent application owned by NYU. \n \n" "\t 5. The Software may only be used for non-commercial research and may not be used for clinical care. \n \n" "\t 6. Any publication by Recipient of research involving the Software shall cite the references listed below."; - + } -using value_type = float; +using real_type = float; + + +template +class DenoisingFunctor { + MEMALIGN(DenoisingFunctor) +public: -template -class DenoisingFunctor { MEMALIGN(DenoisingFunctor) - public: - DenoisingFunctor (ImageType& dwi, vector extent, Image& mask, ImageType& noise) + using MatrixType = Eigen::Matrix; + using SValsType = Eigen::VectorXd; + + DenoisingFunctor (int ndwi, const vector& extent, + Image& mask, Image& noise, bool exp1) : extent {{extent[0]/2, extent[1]/2, extent[2]/2}}, - m (dwi.size(3)), - n (extent[0]*extent[1]*extent[2]), - r ((m void operator () (ImageType& dwi, ImageType& out) { + // Process voxels in mask only if (mask.valid()) { assign_pos_of (dwi).to (mask); if (!mask.value()) @@ -114,106 +150,130 @@ class DenoisingFunctor { MEMALIGN(DenoisingFunctor) load_data (dwi); // Compute Eigendecomposition: - Eigen::MatrixXf XtX (r,r); + MatrixType XtX (r,r); if (m <= n) - XtX.template triangularView() = X * X.transpose(); - else - XtX.template triangularView() = X.transpose() * X; - Eigen::SelfAdjointEigenSolver eig (XtX); - // eigenvalues provide squared singular values: - Eigen::VectorXf s = eig.eigenvalues(); - + XtX.template triangularView() = X * X.adjoint(); + else + XtX.template triangularView() = X.adjoint() * X; + Eigen::SelfAdjointEigenSolver eig (XtX); + // eigenvalues sorted in increasing order: + SValsType s = eig.eigenvalues().template cast(); + // Marchenko-Pastur optimal threshold - const double lam_r = s[0] / n; + const double lam_r = std::max(s[0], 0.0) / q; double clam = 0.0; - sigma2 = NaN; + sigma2 = 0.0; ssize_t cutoff_p = 0; - for (ssize_t p = 0; p < r; ++p) - { - double lam = s[p] / n; + for (ssize_t p = 0; p < r; ++p) // p+1 is the number of noise components + { // (as opposed to the paper where p is defined as the number of signal components) + double lam = std::max(s[p], 0.0) / q; clam += lam; - double gam = double(m-r+p+1) / double(n); - double sigsq1 = clam / (p+1) / std::max (gam, 1.0); - double sigsq2 = (lam - lam_r) / 4 / std::sqrt(gam); + double gam = double(p+1) / (exp1 ? q : q-(r-p-1)); + double sigsq1 = clam / double(p+1); + double sigsq2 = (lam - lam_r) / (4.0 * std::sqrt(gam)); // sigsq2 > sigsq1 if signal else noise if (sigsq2 < sigsq1) { sigma2 = sigsq1; cutoff_p = p+1; - } + } } if (cutoff_p > 0) { // recombine data using only eigenvectors above threshold: s.head (cutoff_p).setZero(); s.tail (r-cutoff_p).setOnes(); - if (m <= n) - X.col (n/2) = eig.eigenvectors() * ( s.asDiagonal() * ( eig.eigenvectors().adjoint() * X.col(n/2) )); - else - X.col (n/2) = X * ( eig.eigenvectors() * ( s.asDiagonal() * eig.eigenvectors().adjoint().col(n/2) )); + if (m <= n) + X.col (n/2) = eig.eigenvectors() * ( s.cast().asDiagonal() * ( eig.eigenvectors().adjoint() * X.col(n/2) )); + else + X.col (n/2) = X * ( eig.eigenvectors() * ( s.cast().asDiagonal() * eig.eigenvectors().adjoint().col(n/2) )); } // Store output assign_pos_of(dwi).to(out); - for (auto l = Loop (3) (out); l; ++l) - out.value() = X(out.index(3), n/2); + out.row(3) = X.col(n/2); // store noise map if requested: if (noise.valid()) { assign_pos_of(dwi).to(noise); - noise.value() = value_type (std::sqrt(sigma2)); + noise.value() = real_type (std::sqrt(sigma2)); } } - - - void load_data (ImageType& dwi) - { + +private: + const std::array extent; + const ssize_t m, n, r, q; + const bool exp1; + MatrixType X; + std::array pos; + double sigma2; + Image mask; + Image noise; + + template + void load_data (ImageType& dwi) { pos[0] = dwi.index(0); pos[1] = dwi.index(1); pos[2] = dwi.index(2); + // fill patch X.setZero(); - ssize_t k = 0; - for (dwi.index(2) = pos[2]-extent[2]; dwi.index(2) <= pos[2]+extent[2]; ++dwi.index(2)) - for (dwi.index(1) = pos[1]-extent[1]; dwi.index(1) <= pos[1]+extent[1]; ++dwi.index(1)) - for (dwi.index(0) = pos[0]-extent[0]; dwi.index(0) <= pos[0]+extent[0]; ++dwi.index(0), ++k) - if (! is_out_of_bounds(dwi,0,3)) - X.col(k) = dwi.row(3); + size_t k = 0; + for (int z = -extent[2]; z <= extent[2]; z++) { + dwi.index(2) = wrapindex(z, 2, dwi.size(2)); + for (int y = -extent[1]; y <= extent[1]; y++) { + dwi.index(1) = wrapindex(y, 1, dwi.size(1)); + for (int x = -extent[0]; x <= extent[0]; x++, k++) { + dwi.index(0) = wrapindex(x, 0, dwi.size(0)); + X.col(k) = dwi.row(3); + } + } + } // reset image position dwi.index(0) = pos[0]; dwi.index(1) = pos[1]; dwi.index(2) = pos[2]; } - -private: - const std::array extent; - const ssize_t m, n, r; - Eigen::MatrixXf X; - std::array pos; - double sigma2; - Image mask; - ImageType noise; - + + inline size_t wrapindex(int r, int axis, int max) const { + // patch handling at image edges + int rr = pos[axis] + r; + if (rr < 0) rr = extent[axis] - r; + if (rr >= max) rr = (max-1) - extent[axis] - r; + return rr; + } + }; +template +void process_image (Header& data, Image& mask, Image noise, + const std::string& output_name, const vector& extent, bool exp1) + { + auto input = data.get_image().with_direct_io(3); + // create output + Header header (data); + header.datatype() = DataType::from(); + auto output = Image::create (output_name, header); + // run + DenoisingFunctor func (data.size(3), extent, mask, noise, exp1); + ThreadedLoop ("running MP-PCA denoising", data, 0, 3).run (func, input, output); + } + + void run () { - auto dwi_in = Image::open (argument[0]).with_direct_io(3); + auto dwi = Header::open (argument[0]); - if (dwi_in.ndim() != 4 || dwi_in.size(3) <= 1) + if (dwi.ndim() != 4 || dwi.size(3) <= 1) throw Exception ("input image must be 4-dimensional"); Image mask; auto opt = get_options ("mask"); if (opt.size()) { mask = Image::open (opt[0][0]); - check_dimensions (mask, dwi_in, 0, 3); + check_dimensions (mask, dwi, 0, 3); } - auto header = Header (dwi_in); - header.datatype() = DataType::Float32; - auto dwi_out = Image::create (argument[1], header); - opt = get_options("extent"); - vector extent = { DEFAULT_SIZE, DEFAULT_SIZE, DEFAULT_SIZE }; + vector extent; if (opt.size()) { extent = parse_ints(opt[0][0]); if (extent.size() == 1) @@ -223,18 +283,49 @@ void run () for (auto &e : extent) if (!(e & 1)) throw Exception ("-extent must be a (list of) odd numbers"); + INFO("user defined patch size " + str(extent[0]) + " x " + str(extent[1]) + " x " + str(extent[2]) + "."); + } else { + int e = 1; + while (e*e*e < dwi.size(3)) + e += 2; + extent = {e, e, e}; + INFO("select default patch size " + str(e) + " x " + str(e) + " x " + str(e) + "."); } - - Image noise; + + bool exp1 = get_option_value("estimator", 1) == 0; // default: Exp2 (unbiased estimator) + + Image noise; opt = get_options("noise"); if (opt.size()) { + Header header (dwi); header.ndim() = 3; - noise = Image::create (opt[0][0], header); + header.datatype() = DataType::Float32; + noise = Image::create (opt[0][0], header); + } + + int prec = get_option_value("datatype", 0); // default: single precision + if (dwi.datatype().is_complex()) prec += 2; // support complex input data + switch (prec) { + case 0: + INFO("select real float32 for processing"); + process_image(dwi, mask, noise, argument[1], extent, exp1); + break; + case 1: + INFO("select real float64 for processing"); + process_image(dwi, mask, noise, argument[1], extent, exp1); + break; + case 2: + INFO("select complex float32 for processing"); + process_image(dwi, mask, noise, argument[1], extent, exp1); + break; + case 3: + INFO("select complex float64 for processing"); + process_image(dwi, mask, noise, argument[1], extent, exp1); + break; } - DenoisingFunctor< Image > func (dwi_in, extent, mask, noise); - ThreadedLoop ("running MP-PCA denoising", dwi_in, 0, 3) - .run (func, dwi_in, dwi_out); + + } diff --git a/cmd/dwiextract.cpp b/cmd/dwiextract.cpp index b9f4810ea6..a65aacf724 100644 --- a/cmd/dwiextract.cpp +++ b/cmd/dwiextract.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "phase_encoding.h" @@ -34,6 +35,14 @@ void usage () SYNOPSIS = "Extract diffusion-weighted volumes, b=0 volumes, or certain shells from a DWI dataset"; + EXAMPLES + + Example ("Calculate the mean b=0 image from a 4D DWI series", + "dwiextract dwi.mif - -bzero | mrmath - mean mean_bzero.mif -axis 3", + "The dwiextract command extracts all volumes for which the b-value is " + "(approximately) zero; the resulting 4D image can then be provided to " + "the mrmath command to calculate the mean intensity across volumes " + "for each voxel."); + ARGUMENTS + Argument ("input", "the input DW image.").type_image_in () + Argument ("output", "the output image (diffusion-weighted volumes by default).").type_image_out (); @@ -50,14 +59,15 @@ void usage () + Stride::Options; } + + + + + void run() { - auto input_header = Header::open (argument[0]); - auto input_image = input_header.get_image(); - - Eigen::MatrixXd grad_unprocessed = DWI::get_DW_scheme (input_image); - Eigen::MatrixXd grad = grad_unprocessed; - DWI::validate_DW_scheme (grad, input_image); + auto input_image = Image::open (argument[0]); + auto grad = DWI::get_DW_scheme (input_image); // Want to support non-shell-like data if it's just a straight extraction // of all dwis or all bzeros i.e. don't initialise the Shells class @@ -89,7 +99,7 @@ void run() } auto opt = get_options ("pe"); - const auto pe_scheme = PhaseEncoding::get_scheme (input_header); + const auto pe_scheme = PhaseEncoding::get_scheme (input_image); if (opt.size()) { if (!pe_scheme.rows()) throw Exception ("Cannot filter volumes by phase-encoding: No such information present"); @@ -128,7 +138,7 @@ void run() Eigen::MatrixXd new_grad (volumes.size(), grad.cols()); for (size_t i = 0; i < volumes.size(); i++) - new_grad.row (i) = grad_unprocessed.row (volumes[i]); + new_grad.row (i) = grad.row (volumes[i]); DWI::set_DW_scheme (header, new_grad); if (pe_scheme.rows()) { diff --git a/cmd/dwinormalise.cpp b/cmd/dwinormalise.cpp deleted file mode 100644 index f22f54adbd..0000000000 --- a/cmd/dwinormalise.cpp +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ - * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/ - */ - - -#include "command.h" -#include "image.h" -#include "algo/loop.h" -#include "dwi/gradient.h" -#include "dwi/shells.h" - - -#define DEFAULT_TARGET_INTENSITY 1000.0 - - -using namespace MR; -using namespace App; - -void usage () { - AUTHOR = "David Raffelt (david.raffelt@florey.edu.au)"; - -SYNOPSIS = "Intensity normalise the b=0 signal within a supplied white matter mask"; - -ARGUMENTS - + Argument ("input", - "the input DWI image containing volumes that are both diffusion weighted and b=0").type_image_in () - - + Argument ("mask", - "the input mask image used to normalise the intensity").type_image_in () - - + Argument ("output", - "the output DWI intensity normalised image").type_image_out (); - -OPTIONS - + Option ("intensity", "normalise the b=0 signal to the specified value (Default: " + str(DEFAULT_TARGET_INTENSITY, 1) + ")") - + Argument ("value").type_float() - - + Option ("percentile", "define the percentile of the mask intensties used for normalisation. " - "If this option is not supplied then the median value (50th percentile) will be " - "normalised to the desired intensity value.") - + Argument ("value").type_integer (0, 100) - - + DWI::GradImportOptions(); - -} - - - -void run () { - - auto input = Image::open (argument[0]); - auto mask = Image::open (argument[1]); - check_dimensions (input, mask, 0, 3); - - - auto grad = DWI::get_DW_scheme (input); - DWI::Shells grad_shells (grad); - - vector bzeros; - for (size_t s = 0; s < grad_shells.count(); ++s) { - if (grad_shells[s].is_bzero()) { - bzeros = grad_shells[s].get_volumes(); - } - } - - vector bzero_mask_values; - float intensity = get_option_value ("intensity", DEFAULT_TARGET_INTENSITY); - int percentile = get_option_value ("percentile", 50); - - for (auto i = Loop ("computing " + str(percentile) + "th percentile within mask", input, 0, 3) (input, mask); i; ++i) { - if (mask.value()) { - float mean_bzero = 0.0; - for (size_t v = 0; v < bzeros.size(); ++v) { - input.index(3) = bzeros[v]; - mean_bzero += input.value(); - } - mean_bzero /= (float)bzeros.size(); - bzero_mask_values.push_back(mean_bzero); - } - } - - std::sort (bzero_mask_values.rbegin(), bzero_mask_values.rend()); - float percentile_value = bzero_mask_values[round(float(bzero_mask_values.size()) * float(percentile) / 100.0)]; - float scale_factor = intensity / percentile_value; - - Header output_header (input); - output_header.keyval()["dwi_norm_scale_factor"] = str(scale_factor); - output_header.keyval()["dwi_norm_percentile"] = str(percentile); - auto output = Image::create (argument[2], output_header); - - for (auto i = Loop ("normalising image intensities", input) (input, output); i; ++i) - output.value() = input.value() * scale_factor; -} - diff --git a/cmd/fixel2peaks.cpp b/cmd/fixel2peaks.cpp new file mode 100644 index 0000000000..b6cea24c4b --- /dev/null +++ b/cmd/fixel2peaks.cpp @@ -0,0 +1,149 @@ +/* Copyright (c) 2008-2019 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#include "command.h" +#include "image.h" +#include "progressbar.h" + +#include "adapter/subset.h" +#include "algo/loop.h" + +#include "fixel/helpers.h" +#include "fixel/keys.h" +#include "fixel/loop.h" +#include "fixel/types.h" + +using namespace MR; +using namespace App; + +using Fixel::index_type; + + +void usage () +{ + + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Convert data in the fixel directory format into a 4D image of 3-vectors"; + + DESCRIPTION + + "If a fixel data file is provided as input, then the 3-vectors in the " + "output image will be scaled based on the data in that file. If the input " + "is instead the fixel directory, or the index or directions file, then " + "all output 3-vectors will possess unit norm."; + + ARGUMENTS + + Argument ("in", "the input fixel information").type_various () + + Argument ("out", "the output peaks image").type_image_out (); + + OPTIONS + + Option ("number", "maximum number of fixels in each voxel (default: based on input data)") + + Argument ("value").type_integer(1) + + Option ("nan", "fill excess peak data with NaNs rather than zeroes"); + +} + + +void run () +{ + Header index_header, directions_header, data_header; + try { + Header input_header = Header::open (argument[0]); + if (Fixel::is_index_image (input_header)) { + index_header = std::move (input_header); + directions_header = Fixel::find_directions_header (Path::dirname (argument[0])); + } else if (Fixel::is_directions_file (input_header)) { + index_header = Fixel::find_index_header (Path::dirname (argument[0])); + directions_header = std::move (input_header); + } else if (Fixel::is_data_file (input_header)) { + index_header = Fixel::find_index_header (Path::dirname (argument[0])); + directions_header = Fixel::find_directions_header (Path::dirname (argument[0])); + data_header = std::move (input_header); + Fixel::check_fixel_size (index_header, data_header); + } else { + throw Exception ("Input image not recognised as part of fixel format"); + } + } catch (Exception& e_asimage) { + try { + if (!Path::is_dir (argument[0])) + throw Exception ("Input path is not a directory"); + index_header = Fixel::find_index_header (argument[0]); + directions_header = Fixel::find_directions_header (argument[0]); + } catch (Exception& e_asdir) { + Exception e ("Could not locate fixel data based on input string \"" + argument[0] + "\""); + e.push_back ("Error when interpreting as image: "); + for (size_t i = 0; i != e_asimage.num(); ++i) + e.push_back (" " + e_asimage[i]); + e.push_back ("Error when interpreting as fixel directory: "); + for (size_t i = 0; i != e_asdir.num(); ++i) + e.push_back (" " + e_asdir[i]); + throw e; + } + } + + Image index_image (index_header.get_image()); + Image directions_image (directions_header.get_image()); + Image data_image; + if (data_header.valid()) + data_image = data_header.get_image(); + + auto opt = get_options ("number"); + index_type max_fixel_count = 0; + if (opt.size()) { + max_fixel_count = opt[0][0]; + } else { + for (auto l = Loop (index_image, 0, 3) (index_image); l; ++l) + max_fixel_count = std::max (max_fixel_count, index_type (index_image.value())); + INFO ("Maximum number of fixels in any given voxel: " + str(max_fixel_count)); + } + + Header out_header (index_header); + out_header.datatype() = DataType::Float32; + out_header.datatype().set_byte_order_native(); + out_header.size(3) = 3 * max_fixel_count; + out_header.name() = std::string (argument[1]); + Image out_image (Image::create (argument[1], out_header)); + + const float fill = get_options ("nan").size() ? NaN : 0.0f; + + if (data_image.valid()) { + for (auto l = Loop ("converting fixel data file to peaks image", index_image, 0, 3) (index_image, out_image); l; ++l) { + out_image.index (3) = 0; + for (auto f = Fixel::Loop (index_image) (directions_image, data_image); f && out_image.index(3) < out_image.size(3); ++f) { + for (size_t axis = 0; axis != 3; ++axis) { + directions_image.index(1) = axis; + out_image.value() = data_image.value() * directions_image.value(); + ++out_image.index(3); + } + } + for (; out_image.index(3) != out_image.size (3); ++out_image.index(3)) + out_image.value() = fill; + } + } else { + for (auto l = Loop ("converting fixels to peaks image", index_image, 0, 3) (index_image, out_image); l; ++l) { + out_image.index (3) = 0; + for (auto f = Fixel::Loop (index_image) (directions_image); f && out_image.index(3) < out_image.size(3); ++f) { + for (directions_image.index(1) = 0; directions_image.index(1) != 3; ++directions_image.index(1)) { + out_image.value() = directions_image.value(); + ++out_image.index(3); + } + } + for (; out_image.index(3) != out_image.size (3); ++out_image.index(3)) + out_image.value() = fill; + } + } +} + diff --git a/cmd/fixel2sh.cpp b/cmd/fixel2sh.cpp index 7f950aeca2..cea64f132a 100644 --- a/cmd/fixel2sh.cpp +++ b/cmd/fixel2sh.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "progressbar.h" @@ -24,10 +25,12 @@ #include "fixel/helpers.h" #include "fixel/keys.h" #include "fixel/loop.h" +#include "fixel/types.h" using namespace MR; using namespace App; +using Fixel::index_type; void usage () @@ -40,7 +43,9 @@ void usage () DESCRIPTION + "This command generates spherical harmonic data from fixels " "that can be visualised using the ODF tool in MRview. The output ODF lobes " - "are scaled according to the values in the input fixel image."; + "are scaled according to the values in the input fixel image." + + + Math::SH::encoding_description; ARGUMENTS + Argument ("fixel_in", "the input fixel data file.").type_image_in () @@ -57,7 +62,7 @@ void run () auto in_data_image = Fixel::open_fixel_data_file (argument[0]); Header in_index_header = Fixel::find_index_header (Fixel::get_fixel_directory (argument[0])); - auto in_index_image =in_index_header.get_image(); + auto in_index_image =in_index_header.get_image(); auto in_directions_image = Fixel::find_directions_header (Fixel::get_fixel_directory (argument[0])).get_image().with_direct_io(); size_t lmax = 8; diff --git a/cmd/fixel2tsf.cpp b/cmd/fixel2tsf.cpp index dc949e2b38..6b6338ec37 100644 --- a/cmd/fixel2tsf.cpp +++ b/cmd/fixel2tsf.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "algo/loop.h" @@ -20,17 +21,22 @@ #include "image.h" #include "fixel/helpers.h" #include "fixel/keys.h" +#include "fixel/types.h" #include "dwi/tractography/file.h" #include "dwi/tractography/scalar_file.h" +#include "dwi/tractography/streamline.h" -#include "dwi/tractography/mapping/mapper.h" #include "dwi/tractography/mapping/loader.h" +#include "dwi/tractography/mapping/mapper.h" + using namespace MR; using namespace App; +using Fixel::index_type; + #define DEFAULT_ANGULAR_THRESHOLD 45.0 @@ -71,7 +77,7 @@ void run () "therefore the input fixel data file must have dimension Nx1x1"); Header in_index_header = Fixel::find_index_header (Fixel::get_fixel_directory (argument[0])); - auto in_index_image = in_index_header.get_image(); + auto in_index_image = in_index_header.get_image(); auto in_directions_image = Fixel::find_directions_header (Fixel::get_fixel_directory (argument[0])).get_image().with_direct_io(); DWI::Tractography::Properties properties; @@ -92,14 +98,17 @@ void run () ProgressBar progress ("mapping fixel values to streamline points", num_tracks); DWI::Tractography::Streamline tck; + DWI::Tractography::TrackScalar scalars; - Transform transform (in_index_image); + const Transform transform (in_index_image); Eigen::Vector3 voxel_pos; while (reader (tck)) { SetVoxelDir dixels; mapper (tck, dixels); - vector scalars (tck.size(), 0.0f); + scalars.clear(); + scalars.set_index (tck.get_index()); + scalars.resize (tck.size(), 0.0f); for (size_t p = 0; p < tck.size(); ++p) { voxel_pos = transform.scanner2voxel * tck[p].cast (); for (SetVoxelDir::const_iterator d = dixels.begin(); d != dixels.end(); ++d) { @@ -111,9 +120,9 @@ void run () int32_t closest_fixel_index = -1; in_index_image.index(3) = 0; - uint32_t num_fixels_in_voxel = in_index_image.value(); + index_type num_fixels_in_voxel = in_index_image.value(); in_index_image.index(3) = 1; - uint32_t offset = in_index_image.value(); + index_type offset = in_index_image.value(); for (size_t fixel = 0; fixel < num_fixels_in_voxel; ++fixel) { in_directions_image.index(0) = offset + fixel; diff --git a/cmd/fixel2voxel.cpp b/cmd/fixel2voxel.cpp index fcd3b3fa3f..8d486554f9 100644 --- a/cmd/fixel2voxel.cpp +++ b/cmd/fixel2voxel.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "apply.h" #include "command.h" #include "progressbar.h" @@ -30,10 +31,13 @@ #include "fixel/helpers.h" #include "fixel/keys.h" #include "fixel/loop.h" +#include "fixel/types.h" using namespace MR; using namespace App; +using Fixel::index_type; + const char* operations[] = { "mean", @@ -48,9 +52,8 @@ const char* operations[] = { "sf", "dec_unit", "dec_scaled", - "split_data", - "split_dir", - NULL + "none", + nullptr }; @@ -66,8 +69,15 @@ void usage () + "- The number of fixels in each voxel: count" + "- Some measure of crossing-fibre organisation: complexity, sf ('single-fibre')" + "- A 4D directionally-encoded colour image: dec_unit, dec_scaled" - + "- A 4D scalar image of fixel values with one 3D volume per fixel: split_data" - + "- A 4D image of fixel directions, stored as three 3D volumes per fixel direction: split_dir"; + + "- A 4D image containing all fixel data values in each voxel unmodified: none" + + + "The -weighted option deals with the case where there is some per-fixel metric of interest " + "that you wish to collapse into a single scalar measure per voxel, but each fixel possesses " + "a different volume, and you wish for those fixels with greater volume to have a greater " + "influence on the calculation than fixels with lesser volume. For instance, when estimating " + "a voxel-based measure of mean axon diameter from per-fixel mean axon diameters, a fixel's " + "mean axon diameter should be weigthed by its relative volume within the voxel in the " + "calculation of that voxel mean."; REFERENCES + "* Reference for 'complexity' operation:\n" @@ -82,29 +92,29 @@ void usage () OPTIONS + Option ("number", "use only the largest N fixels in calculation of the voxel-wise statistic; " - "in the case of \"split_data\" and \"split_dir\", output only the largest N " - "fixels, padding where necessary.") + "in the case of operation \"none\", output only the largest N fixels in each voxel.") + Argument ("N").type_integer(1) - + Option ("weighted", "weight the contribution of each fixel to the per-voxel result according to its volume. " - "E.g. when estimating a voxel-based measure of mean axon diameter, a fixel's mean axon diameter " - "should be weigthed by its relative volume within the voxel. Note that AFD can be used as a psuedomeasure of fixel volume.") + + Option ("fill", "for \"none\" operation, specify the value to fill when number of fixels is fewer than the maximum (default: 0.0)") + + Argument ("value").type_float() + + + Option ("weighted", "weight the contribution of each fixel to the per-voxel result according to its volume.") + Argument ("fixel_in").type_image_in (); } -using FixelIndexType = Image; +using FixelIndexType = Image; using FixelDataType = Image; struct set_offset { NOMEMALIGN - FORCE_INLINE set_offset (uint32_t offset) : offset (offset) { } + FORCE_INLINE set_offset (index_type offset) : offset (offset) { } template FORCE_INLINE void operator() (DataType& data) { data.index(0) = offset; } - uint32_t offset; + index_type offset; }; struct inc_fixel { NOMEMALIGN @@ -115,18 +125,18 @@ struct inc_fixel { NOMEMALIGN struct LoopFixelsInVoxelWithMax { NOMEMALIGN - const size_t num_fixels; - const size_t max_fixels; - const uint32_t offset; + const index_type num_fixels; + const index_type max_fixels; + const index_type offset; template struct Run { NOMEMALIGN - const size_t num_fixels; - const size_t max_fixels; - const uint32_t offset; - uint32_t fixel_index; + const index_type num_fixels; + const index_type max_fixels; + const index_type offset; + index_type fixel_index; const std::tuple data; - FORCE_INLINE Run (const size_t num_fixels, const size_t max_fixels, const uint32_t offset, const std::tuple& data) : + FORCE_INLINE Run (const index_type num_fixels, const index_type max_fixels, const index_type offset, const std::tuple& data) : num_fixels (num_fixels), max_fixels (max_fixels), offset (offset), fixel_index (0), data (data) { apply (set_offset (offset), data); } @@ -134,7 +144,7 @@ struct LoopFixelsInVoxelWithMax { NOMEMALIGN FORCE_INLINE void operator++() { if (!padding()) apply (inc_fixel (), data); ++fixel_index; } FORCE_INLINE void operator++(int) { operator++(); } FORCE_INLINE bool padding() const { return (max_fixels && fixel_index >= num_fixels); } - FORCE_INLINE size_t count() const { return max_fixels ? max_fixels : num_fixels; } + FORCE_INLINE index_type count() const { return max_fixels ? max_fixels : num_fixels; } }; template @@ -147,7 +157,7 @@ struct LoopFixelsInVoxelWithMax { NOMEMALIGN class Base { NOMEMALIGN public: - Base (FixelDataType& data, const size_t max_fixels, const bool pad = false, const float pad_value = 0.0) : + Base (FixelDataType& data, const index_type max_fixels, const bool pad = false, const float pad_value = 0.0) : data (data), max_fixels (max_fixels), pad (pad), @@ -157,14 +167,14 @@ class Base FORCE_INLINE LoopFixelsInVoxelWithMax Loop (FixelIndexType& index) { index.index(3) = 0; - const size_t num_fixels = index.value(); + const index_type num_fixels = index.value(); index.index(3) = 1; - const uint32_t offset = index.value(); + const index_type offset = index.value(); return { num_fixels, max_fixels, offset }; } FixelDataType data; - const size_t max_fixels; + const index_type max_fixels; const bool pad; const float pad_value; @@ -174,7 +184,7 @@ class Base class Mean : protected Base { MEMALIGN (Mean) public: - Mean (FixelDataType& data, const size_t max_fixels, FixelDataType& vol) : + Mean (FixelDataType& data, const index_type max_fixels, FixelDataType& vol) : Base (data, max_fixels), vol (vol) {} @@ -208,7 +218,7 @@ class Mean : protected Base class Sum : protected Base { MEMALIGN (Sum) public: - Sum (FixelDataType& data, const size_t max_fixels, FixelDataType& vol) : + Sum (FixelDataType& data, const index_type max_fixels, FixelDataType& vol) : Base (data, max_fixels), vol (vol) {} @@ -234,23 +244,23 @@ class Sum : protected Base class Product : protected Base { MEMALIGN (Product) public: - Product (FixelDataType& data, const size_t max_fixels) : + Product (FixelDataType& data, const index_type max_fixels) : Base (data, max_fixels) { } void operator() (FixelIndexType& index, Image& out) { index.index(3) = 0; - size_t num_fixels = index.value(); + index_type num_fixels = index.value(); if (!num_fixels) { out.value() = 0.0; return; } index.index(3) = 1; - uint32_t offset = index.value(); + index_type offset = index.value(); data.index(0) = offset; out.value() = data.value(); num_fixels = max_fixels ? std::min (max_fixels, num_fixels) : num_fixels; - for (size_t f = 1; f != num_fixels; ++f) { + for (index_type f = 1; f != num_fixels; ++f) { data.index(0)++; out.value() *= data.value(); } @@ -261,7 +271,7 @@ class Product : protected Base class Min : protected Base { MEMALIGN (Min) public: - Min (FixelDataType& data, const size_t max_fixels) : + Min (FixelDataType& data, const index_type max_fixels) : Base (data, max_fixels) { } void operator() (FixelIndexType& index, Image& out) @@ -279,7 +289,7 @@ class Min : protected Base class Max : protected Base { MEMALIGN (Max) public: - Max (FixelDataType& data, const size_t max_fixels) : + Max (FixelDataType& data, const index_type max_fixels) : Base (data, max_fixels) { } void operator() (FixelIndexType& index, Image& out) @@ -297,7 +307,7 @@ class Max : protected Base class AbsMax : protected Base { MEMALIGN (AbsMax) public: - AbsMax (FixelDataType& data, const size_t max_fixels) : + AbsMax (FixelDataType& data, const index_type max_fixels) : Base (data, max_fixels) { } void operator() (FixelIndexType& index, Image& out) @@ -315,7 +325,7 @@ class AbsMax : protected Base class MagMax : protected Base { MEMALIGN (MagMax) public: - MagMax (FixelDataType& data, const size_t num_fixels) : + MagMax (FixelDataType& data, const index_type num_fixels) : Base (data, num_fixels) { } void operator() (FixelIndexType& index, Image& out) @@ -333,13 +343,13 @@ class MagMax : protected Base class Complexity : protected Base { MEMALIGN (Complexity) public: - Complexity (FixelDataType& data, const size_t max_fixels) : + Complexity (FixelDataType& data, const index_type max_fixels) : Base (data, max_fixels) { } void operator() (FixelIndexType& index, Image& out) { index.index(3) = 0; - size_t num_fixels = index.value(); + index_type num_fixels = index.value(); num_fixels = max_fixels ? std::min (num_fixels, max_fixels) : num_fixels; if (num_fixels <= 1) { out.value() = 0.0; @@ -361,10 +371,10 @@ class Complexity : protected Base class SF : protected Base { MEMALIGN (SF) public: - SF (FixelDataType& data, const size_t max_fixels) : + SF (FixelDataType& data, const index_type max_fixels) : Base (data, max_fixels) { } - void operator() (Image& index, FixelDataType& out) + void operator() (Image& index, FixelDataType& out) { default_type max = 0.0; default_type sum = 0.0; @@ -382,11 +392,11 @@ class SF : protected Base class DEC_unit : protected Base { MEMALIGN (DEC_unit) public: - DEC_unit (FixelDataType& data, const size_t max_fixels, FixelDataType& vol, Image& dir) : + DEC_unit (FixelDataType& data, const index_type max_fixels, FixelDataType& vol, Image& dir) : Base (data, max_fixels), vol (vol), dir (dir) {} - void operator() (Image& index, Image& out) + void operator() (Image& index, Image& out) { Eigen::Vector3 sum_dec = {0.0, 0.0, 0.0}; if (vol.valid()) { @@ -414,7 +424,7 @@ class DEC_unit : protected Base class DEC_scaled : protected Base { MEMALIGN (DEC_scaled) public: - DEC_scaled (FixelDataType& data, const size_t max_fixels, FixelDataType& vol, Image& dir) : + DEC_scaled (FixelDataType& data, const index_type max_fixels, FixelDataType& vol, Image& dir) : Base (data, max_fixels), vol (vol), dir (dir) {} @@ -454,11 +464,11 @@ class DEC_scaled : protected Base }; -class SplitData : protected Base -{ MEMALIGN (SplitData) +class None : protected Base +{ MEMALIGN (None) public: - SplitData (FixelDataType& data, const size_t max_fixels) : - Base (data, max_fixels, true) { } + None (FixelDataType& data, const index_type max_fixels, const float fill_value) : + Base (data, max_fixels, true, fill_value) { } void operator() (FixelIndexType& index, Image& out) { @@ -470,33 +480,6 @@ class SplitData : protected Base }; -class SplitDir : protected Base -{ MEMALIGN (SplitDir) - public: - SplitDir (FixelDataType& dir, const size_t max_fixels) : - Base (dir, max_fixels, true, NAN) { } - - void operator() (FixelIndexType& index, Image& out) - { - out.index(3) = 0; - for (auto f = Base::Loop (index) (data); f; ++f) { - if (f.padding()) { - for (size_t axis = 0; axis < 3; ++axis) { - out.value() = pad_value; - ++out.index(3); - } - } else { - for (size_t axis = 0; axis < 3; ++axis) { - data.index(1) = axis; - out.value() = data.value(); - ++out.index(3); - } - } - } - } -}; - - @@ -515,7 +498,7 @@ void run () const int op = argument[1]; - const size_t max_fixels = get_option_value ("number", 0); + const index_type max_fixels = get_option_value ("number", 0); if (max_fixels && op == 7) throw Exception ("\"count\" statistic is meaningless if constraining the number of fixels per voxel using the -number option"); @@ -524,27 +507,29 @@ void run () H_out.datatype().set_byte_order_native(); H_out.keyval().erase (Fixel::n_fixels_key); if (op == 7) { // count + H_out.ndim() = 3; H_out.datatype() = DataType::UInt8; } else if (op == 10 || op == 11) { // dec H_out.ndim() = 4; H_out.size (3) = 3; - } else if (op == 12 || op == 13) { // split_* + } else if (op == 12) { // none H_out.ndim() = 4; if (max_fixels) { - // 3 volumes per fixel if performing split_dir - H_out.size(3) = (op == 13) ? (3 * max_fixels) : max_fixels; + H_out.size(3) = max_fixels; } else { - uint32_t max_count = 0; + index_type max_count = 0; for (auto l = Loop ("determining largest fixel count", in_index_image, 0, 3) (in_index_image); l; ++l) - max_count = std::max (max_count, (uint32_t)in_index_image.value()); + max_count = std::max (max_count, (index_type)in_index_image.value()); if (max_count == 0) throw Exception ("fixel image is empty"); // 3 volumes per fixel if performing split_dir - H_out.size(3) = (op == 13) ? (3 * max_count) : max_count; + H_out.size(3) = max_count; } + } else { + H_out.ndim() = 3; } - if (op == 10 || op == 11 || op == 13) // dec or split_dir + if (op == 10 || op == 11) // dec in_directions = Fixel::find_directions_header ( Fixel::get_fixel_directory (in_data.name())).get_image().with_direct_io(); @@ -556,11 +541,21 @@ void run () } if (op == 2 || op == 3 || op == 4 || op == 5 || op == 6 || - op == 7 || op == 8 || op == 9 || op == 12 || op == 13) { + op == 7 || op == 8 || op == 9 || op == 12) { if (in_vol.valid()) WARN ("Option -weighted has no meaningful interpretation for the operation specified; ignoring"); } + opt = get_options ("fill"); + float fill_value = 0.0; + if (opt.size()) { + if (op == 12) { + fill_value = opt[0][0]; + } else { + WARN ("Option -fill ignored; only applicable to \"none\" operation"); + } + } + auto out = Image::create (argument[2], H_out); auto loop = ThreadedLoop ("converting sparse fixel data to scalar image", in_index_image, 0, 3); @@ -573,16 +568,14 @@ void run () case 4: loop.run (Max (in_data, max_fixels), in_index_image, out); break; case 5: loop.run (AbsMax (in_data, max_fixels), in_index_image, out); break; case 6: loop.run (MagMax (in_data, max_fixels), in_index_image, out); break; - case 7: loop.run ([](Image& index, Image& out) { // count + case 7: loop.run ([](Image& index, Image& out) { // count out.value() = index.value(); }, in_index_image, out); break; case 8: loop.run (Complexity (in_data, max_fixels), in_index_image, out); break; case 9: loop.run (SF (in_data, max_fixels), in_index_image, out); break; case 10: loop.run (DEC_unit (in_data, max_fixels, in_vol, in_directions), in_index_image, out); break; case 11: loop.run (DEC_scaled (in_data, max_fixels, in_vol, in_directions), in_index_image, out); break; - case 12: loop.run (SplitData (in_data, max_fixels), in_index_image, out); break; - case 13: loop.run (SplitDir (in_directions, max_fixels), in_index_image, out); break; + case 12: loop.run (::None (in_data, max_fixels, fill_value), in_index_image, out); break; } } - diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 3ef898a091..d6dd45147a 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -1,54 +1,59 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" +#include "image.h" #include "progressbar.h" #include "thread_queue.h" -#include "algo/loop.h" #include "transform.h" -#include "image.h" +#include "algo/loop.h" #include "fixel/helpers.h" +#include "fixel/index_remapper.h" #include "fixel/keys.h" #include "fixel/loop.h" +#include "fixel/types.h" +#include "fixel/filter/smooth.h" +#include "math/stats/fwe.h" #include "math/stats/glm.h" -#include "math/stats/permutation.h" +#include "math/stats/import.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" #include "stats/cfe.h" #include "stats/enhance.h" #include "stats/permtest.h" -#include "dwi/tractography/file.h" -#include "dwi/tractography/mapping/mapper.h" -#include "dwi/tractography/mapping/loader.h" -#include "dwi/tractography/mapping/writer.h" using namespace MR; using namespace App; -using namespace MR::DWI::Tractography::Mapping; -using namespace MR::Math::Stats; -using Stats::CFE::direction_type; -using Stats::CFE::connectivity_value_type; -using Stats::CFE::index_type; + +using Fixel::index_type; +using Math::Stats::matrix_type; +using Math::Stats::value_type; +using Math::Stats::vector_type; +using Stats::PermTest::count_matrix_type; + +#define DEFAULT_ANGLE_THRESHOLD 45.0 +#define DEFAULT_CONNECTIVITY_THRESHOLD 0.01 +#define DEFAULT_SMOOTHING_FWHM 10.0 #define DEFAULT_CFE_DH 0.1 #define DEFAULT_CFE_E 2.0 #define DEFAULT_CFE_H 3.0 #define DEFAULT_CFE_C 0.5 -#define DEFAULT_ANGLE_THRESHOLD 45.0 -#define DEFAULT_CONNECTIVITY_THRESHOLD 0.01 -#define DEFAULT_SMOOTHING_STD 10.0 +#define DEFAULT_EMPIRICAL_SKEW 1.0 // TODO Update from experience void usage () { @@ -57,21 +62,34 @@ void usage () SYNOPSIS = "Fixel-based analysis using connectivity-based fixel enhancement and non-parametric permutation testing"; DESCRIPTION + + "Unlike previous versions of this command, where a whole-brain tractogram file would be provided as input " + "in order to generate the fixel-fixel connectivity matrix and smooth fixel data, this version expects to be " + "provided with the directory path to a pre-calculated fixel-fixel connectivity matrix (likely generated using " + "the MRtrix3 command fixelconnectivity), and for the input fixel data to have already been smoothed (likely " + "using the MRtrix3 command fixelfilter)." + + "Note that if the -mask option is used, the output fixel directory will still contain the same set of fixels as that " "present in the input fixel template, in order to retain fixel correspondence. However a consequence of this is that " "all fixels in the template will be initialy visible when the output fixel directory is loaded in mrview. Those fixels " "outside the processing mask will immediately disappear from view as soon as any data-file-based fixel colouring or " - "thresholding is applied."; + "thresholding is applied." + + + Math::Stats::GLM::column_ones_description; REFERENCES - + "Raffelt, D.; Smith, RE.; Ridgway, GR.; Tournier, JD.; Vaughan, DN.; Rose, S.; Henderson, R.; Connelly, A." // Internal - "Connectivity-based fixel enhancement: Whole-brain statistical analysis of diffusion MRI measures in the presence of crossing fibres. \n" - "Neuroimage, 2015, 15(117):40-55\n" + + "Raffelt, D.; Smith, RE.; Ridgway, GR.; Tournier, JD.; Vaughan, DN.; Rose, S.; Henderson, R.; Connelly, A. " // Internal + "Connectivity-based fixel enhancement: Whole-brain statistical analysis of diffusion MRI measures in the presence of crossing fibres." + "Neuroimage, 2015, 15(117):40-55" + + + "* If not using the -cfe_legacy option: \n" + "Smith, RE.; Dimond, D; Vaughan, D.; Parker, D.; Dhollander, T.; Jackson, G.; Connelly, A. " + "Intrinsic non-stationarity correction for Fixel-Based Analysis. " + "In Proc OHBM 2019 M789" + "* If using the -nonstationary option: \n" - "Salimi-Khorshidi, G. Smith, S.M. Nichols, T.E. \n" - "Adjusting the effect of nonstationarity in cluster-based and TFCE inference. \n" - "NeuroImage, 2011, 54(3), 2006-19\n" ; + "Salimi-Khorshidi, G. Smith, S.M. Nichols, T.E. " + "Adjusting the effect of nonstationarity in cluster-based and TFCE inference. " + "NeuroImage, 2011, 54(3), 2006-19"; ARGUMENTS + Argument ("in_fixel_directory", "the fixel directory containing the data files for each subject (after obtaining fixel correspondence").type_directory_in() @@ -79,18 +97,23 @@ void usage () + Argument ("subjects", "a text file listing the subject identifiers (one per line). This should correspond with the filenames " "in the fixel directory (including the file extension), and be listed in the same order as the rows of the design matrix.").type_image_in () - + Argument ("design", "the design matrix. Note that a column of 1's will need to be added for correlations.").type_file_in () + + Argument ("design", "the design matrix").type_file_in () - + Argument ("contrast", "the contrast vector, specified as a single row of weights").type_file_in () + + Argument ("contrast", "the contrast matrix, specified as rows of weights").type_file_in () - + Argument ("tracks", "the tracks used to determine fixel-fixel connectivity").type_tracks_in () + // .type_various() rather than .type_directory_in() to catch people trying to + // pass a track file, and give a more informative error message + + Argument ("connectivity", "the fixel-fixel connectivity matrix").type_various () + Argument ("out_fixel_directory", "the output directory where results will be saved. Will be created if it does not exist").type_text(); OPTIONS - + Stats::PermTest::Options (true) + + Option ("mask", "provide a fixel data file containing a mask of those fixels to be used during processing") + + Argument ("file").type_image_in() + + + Math::Stats::shuffle_options (true, DEFAULT_EMPIRICAL_SKEW) + OptionGroup ("Parameters for the Connectivity-based Fixel Enhancement algorithm") @@ -106,22 +129,9 @@ void usage () + Option ("cfe_c", "cfe connectivity exponent (default: " + str(DEFAULT_CFE_C, 2) + ")") + Argument ("value").type_float (0.0, 100.0) - + OptionGroup ("Additional options for fixelcfestats") - - + Option ("negative", "automatically test the negative (opposite) contrast. By computing the opposite contrast simultaneously " - "the computation time is reduced.") - - + Option ("smooth", "smooth the fixel value along the fibre tracts using a Gaussian kernel with the supplied FWHM (default: " + str(DEFAULT_SMOOTHING_STD, 2) + "mm)") - + Argument ("FWHM").type_float (0.0, 200.0) - - + Option ("connectivity", "a threshold to define the required fraction of shared connections to be included in the neighbourhood (default: " + str(DEFAULT_CONNECTIVITY_THRESHOLD, 2) + ")") - + Argument ("threshold").type_float (0.0, 1.0) + + Option ("cfe_legacy", "use the legacy (non-normalised) form of the cfe equation") - + Option ("angle", "the max angle threshold for assigning streamline tangents to fixels (Default: " + str(DEFAULT_ANGLE_THRESHOLD, 2) + " degrees)") - + Argument ("value").type_float (0.0, 90.0) - - + Option ("mask", "provide a fixel data file containing a mask of those fixels to be used during processing") - + Argument ("file").type_image_in(); + + Math::Stats::GLM::glm_options ("fixel"); } @@ -130,409 +140,349 @@ void usage () template void write_fixel_output (const std::string& filename, const VectorType& data, - const vector& fixel2row, + Image& mask, const Header& header) { - assert (size_t(header.size(0)) == fixel2row.size()); auto output = Image::create (filename, header); - for (size_t i = 0; i != fixel2row.size(); ++i) { - output.index(0) = i; - output.value() = (fixel2row[i] >= 0) ? data[fixel2row[i]] : NaN; - } + for (auto l = Loop(0) (output, mask); l; ++l) + output.value() = mask.value() ? data[output.index(0)] : NaN; } -void run() { +// Define data importer class that will obtain fixel data for a +// specific subject based on the string path to the image file for +// that subject +class SubjectFixelImport : public Math::Stats::SubjectDataImportBase +{ MEMALIGN(SubjectFixelImport) + public: + SubjectFixelImport (const std::string& path) : + Math::Stats::SubjectDataImportBase (path), + H (Header::open (path)), + data (H.get_image()) + { + for (size_t axis = 1; axis < data.ndim(); ++axis) { + if (data.size(axis) > 1) + throw Exception ("Image file \"" + path + "\" does not contain fixel data (wrong dimensions)"); + } + } + + void operator() (matrix_type::RowXpr row) const override + { + Image temp (data); // For thread-safety + for (temp.index(0) = 0; temp.index(0) != temp.size(0); ++temp.index(0)) + row [temp.index(0)] = temp.value(); + } + + default_type operator[] (const size_t index) const override + { + Image temp (data); // For thread-safety + temp.index(0) = index; + assert (!is_out_of_bounds (temp)); + return default_type(temp.value()); + } + + size_t size() const override { return data.size(0); } + + const Header& header() const { return H; } + + + + private: + Header H; + Image data; // May be mapped input file, or scratch smoothed data + +}; + + + + +void run() +{ + if (Path::has_suffix (argument[4], ".tck")) + throw Exception ("This version of fixelcfestats requires as input not a track file, but a " + "pre-calculated fixel-fixel connectivity matrix; in addition, input fixel " + "data must be pre-smoothed. Please check command / pipeline documentation " + "specific to this software version."); - auto opt = get_options ("negative"); - bool compute_negative_contrast = opt.size() ? true : false; const value_type cfe_dh = get_option_value ("cfe_dh", DEFAULT_CFE_DH); const value_type cfe_h = get_option_value ("cfe_h", DEFAULT_CFE_H); const value_type cfe_e = get_option_value ("cfe_e", DEFAULT_CFE_E); const value_type cfe_c = get_option_value ("cfe_c", DEFAULT_CFE_C); - int num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); - const value_type smooth_std_dev = get_option_value ("smooth", DEFAULT_SMOOTHING_STD) / 2.3548; - const value_type connectivity_threshold = get_option_value ("connectivity", DEFAULT_CONNECTIVITY_THRESHOLD); - const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); - int nperms_nonstationary = get_option_value ("nperms_nonstationary", DEFAULT_NUMBER_PERMUTATIONS_NONSTATIONARITY); - const value_type angular_threshold = get_option_value ("angle", DEFAULT_ANGLE_THRESHOLD); + const bool cfe_legacy = get_options ("cfe_legacy").size(); + const bool do_nonstationarity_adjustment = get_options ("nonstationarity").size(); + const default_type empirical_skew = get_option_value ("skew_nonstationarity", DEFAULT_EMPIRICAL_SKEW); const std::string input_fixel_directory = argument[0]; Header index_header = Fixel::find_index_header (input_fixel_directory); auto index_image = index_header.get_image(); const index_type num_fixels = Fixel::get_number_of_fixels (index_header); - CONSOLE ("number of fixels: " + str(num_fixels)); + CONSOLE ("Number of fixels in template: " + str(num_fixels)); Image mask; - index_type mask_fixels; - // Lookup table that maps from input fixel index to row number - // Note that if a fixel is masked out, it will have a value of -1 - // in this array; hence require a signed integer type - vector fixel2row (num_fixels); - opt = get_options ("mask"); + auto opt = get_options ("mask"); + index_type mask_fixels = 0; if (opt.size()) { mask = Image::open (opt[0][0]); Fixel::check_data_file (mask); if (!Fixel::fixels_match (index_header, mask)) throw Exception ("Mask image provided using -mask option does not match fixel template"); - mask_fixels = 0; - for (mask.index(0) = 0; mask.index(0) != num_fixels; ++mask.index(0)) - fixel2row[mask.index(0)] = mask.value() ? mask_fixels++ : -1; - CONSOLE ("Fixel mask contains " + str(mask_fixels) + " fixels"); + for (auto l = Loop(0) (mask); l; ++l) { + if (mask.value()) + ++mask_fixels; + } + CONSOLE ("Number of fixels in mask: " + str(mask_fixels)); } else { - Header data_header; - data_header.ndim() = 3; - data_header.size(0) = num_fixels; - data_header.size(1) = 1; - data_header.size(2) = 1; - data_header.spacing(0) = data_header.spacing(1) = data_header.spacing(2) = 1.0; - data_header.stride(0) = 1; data_header.stride(1) = 2; data_header.stride(2) = 3; - data_header.transform().setIdentity(); - mask = Image::scratch (data_header, "scratch fixel mask"); - for (index_type f = 0; f != num_fixels; ++f) { - mask.index(0) = f; + Header fixel_mask_header = Fixel::data_header_from_index (index_header); + fixel_mask_header.datatype() = DataType::Bit; + mask = Image::scratch (fixel_mask_header, "true-filled scratch fixel mask"); + for (auto l = Loop(0) (mask); l; ++l) mask.value() = true; - } mask_fixels = num_fixels; - for (index_type f = 0; f != num_fixels; ++f) - fixel2row[f] = f; } - vector positions (num_fixels); - vector directions (num_fixels); - - const std::string output_fixel_directory = argument[5]; - Fixel::copy_index_and_directions_file (input_fixel_directory, output_fixel_directory); - - { - auto directions_data = Fixel::find_directions_header (input_fixel_directory).get_image().with_direct_io ({+2,+1}); - // Load template fixel directions - Transform image_transform (index_image); - for (auto i = Loop ("loading template fixel directions and positions", index_image, 0, 3)(index_image); i; ++i) { - Eigen::Vector3 vox ((default_type)index_image.index(0), (default_type)index_image.index(1), (default_type)index_image.index(2)); - vox = image_transform.voxel2scanner * vox; - index_image.index(3) = 1; - const index_type offset = index_image.value(); - size_t fixel_index = 0; - for (auto f = Fixel::Loop (index_image) (directions_data); f; ++f, ++fixel_index) { - directions[offset + fixel_index] = directions_data.row(1); - positions[offset + fixel_index] = vox; - } - } - } - // Read identifiers and check files exist - vector identifiers; - Header header; - { - ProgressBar progress ("validating input files"); - std::ifstream ifs (argument[1].c_str()); - std::string temp; - while (getline (ifs, temp)) { - temp = strip (temp); - if (temp.empty()) - continue; - const std::string filename = Path::join (input_fixel_directory, temp); - if (!MR::Path::exists (filename)) - throw Exception ("input fixel image not found: " + filename); - header = Header::open (filename); - Fixel::fixels_match (index_header, header); - identifiers.push_back (filename); - progress++; - } + // Read file names and check files exist + // Preference for finding files relative to input template fixel directory + Math::Stats::CohortDataImport importer; + importer.initialise (argument[1], input_fixel_directory); + for (size_t i = 0; i != importer.size(); ++i) { + if (!Fixel::fixels_match (index_header, dynamic_cast(importer[i].get())->header())) + throw Exception ("Fixel data file \"" + importer[i]->name() + "\" does not match template fixel image"); } + CONSOLE ("Number of inputs: " + str(importer.size())); // Load design matrix: const matrix_type design = load_matrix (argument[2]); - if (design.rows() != (ssize_t)identifiers.size()) - throw Exception ("number of input files does not match number of rows in design matrix"); - - // Load permutations file if supplied - opt = get_options("permutations"); - vector > permutations; - if (opt.size()) { - permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - num_perms = permutations.size(); - if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); - } - - // Load non-stationary correction permutations file if supplied - opt = get_options("permutations_nonstationary"); - vector > permutations_nonstationary; - if (opt.size()) { - permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - nperms_nonstationary = permutations_nonstationary.size(); - if (permutations_nonstationary[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); - } - - // Load contrast matrix: - const matrix_type contrast = load_matrix (argument[3]); - - if (contrast.cols() != design.cols()) - throw Exception ("the number of columns per contrast does not equal the number of columns in the design matrix"); - if (contrast.rows() > 1) - throw Exception ("only a single contrast vector (defined as a row) is currently supported"); - - // Compute fixel-fixel connectivity - Stats::CFE::init_connectivity_matrix_type connectivity_matrix (num_fixels); - vector fixel_TDI (num_fixels, 0); - const std::string track_filename = argument[4]; - DWI::Tractography::Properties properties; - DWI::Tractography::Reader track_file (track_filename, properties); - // Read in tracts, and compute whole-brain fixel-fixel connectivity - const size_t num_tracks = properties["count"].empty() ? 0 : to (properties["count"]); - if (!num_tracks) - throw Exception ("no tracks found in input file"); - if (num_tracks < 1000000) - WARN ("more than 1 million tracks should be used to ensure robust fixel-fixel connectivity"); - { - DWI::Tractography::Mapping::TrackLoader loader (track_file, num_tracks, "pre-computing fixel-fixel connectivity"); - DWI::Tractography::Mapping::TrackMapperBase mapper (index_image); - mapper.set_upsample_ratio (DWI::Tractography::Mapping::determine_upsample_ratio (index_header, properties, 0.333f)); - mapper.set_use_precise_mapping (true); - Stats::CFE::TrackProcessor tract_processor (index_image, directions, mask, fixel_TDI, connectivity_matrix, angular_threshold); - Thread::run_queue ( - loader, - Thread::batch (DWI::Tractography::Streamline()), - mapper, - Thread::batch (DWI::Tractography::Mapping::SetVoxelDir()), - tract_processor); - } - track_file.close(); - - // Normalise connectivity matrix, threshold, and put in a more efficient format - Stats::CFE::norm_connectivity_matrix_type norm_connectivity_matrix (mask_fixels); - // Also pre-compute fixel-fixel weights for smoothing. - Stats::CFE::norm_connectivity_matrix_type smoothing_weights (mask_fixels); - bool do_smoothing = false; - - const float gaussian_const2 = 2.0 * smooth_std_dev * smooth_std_dev; - float gaussian_const1 = 1.0; - if (smooth_std_dev > 0.0) { - do_smoothing = true; - gaussian_const1 = 1.0 / (smooth_std_dev * std::sqrt (2.0 * Math::pi)); - } - - { - ProgressBar progress ("normalising and thresholding fixel-fixel connectivity matrix", num_fixels); - for (index_type fixel = 0; fixel < num_fixels; ++fixel) { - mask.index(0) = fixel; - const int32_t row = fixel2row[fixel]; - - if (mask.value()) { - - // Here, the connectivity matrix needs to be modified to reflect the - // fact that fixel indices in the template fixel image may not - // correspond to rows in the statistical analysis - connectivity_value_type sum_weights = 0.0; - - for (auto& it : connectivity_matrix[fixel]) { -#ifndef NDEBUG - // Even if this fixel is within the mask, it should still not - // connect to any fixel that is outside the mask - mask.index(0) = it.first; - assert (mask.value()); -#endif - const connectivity_value_type connectivity = it.second.value / connectivity_value_type (fixel_TDI[fixel]); - if (connectivity >= connectivity_threshold) { - if (do_smoothing) { - const value_type distance = std::sqrt (Math::pow2 (positions[fixel][0] - positions[it.first][0]) + - Math::pow2 (positions[fixel][1] - positions[it.first][1]) + - Math::pow2 (positions[fixel][2] - positions[it.first][2])); - const connectivity_value_type smoothing_weight = connectivity * gaussian_const1 * std::exp (-Math::pow2 (distance) / gaussian_const2); - if (smoothing_weight >= connectivity_threshold) { - smoothing_weights[row].push_back (Stats::CFE::NormMatrixElement (fixel2row[it.first], smoothing_weight)); - sum_weights += smoothing_weight; - } - } - // Here we pre-exponentiate each connectivity value by C - norm_connectivity_matrix[row].push_back (Stats::CFE::NormMatrixElement (fixel2row[it.first], std::pow (connectivity, cfe_c))); + if (design.rows() != (ssize_t)importer.size()) + throw Exception ("Number of input files does not match number of rows in design matrix"); + + // Before validating the contrast matrix, we first need to see if there are any + // additional design matrix columns coming from fixel-wise subject data + vector extra_columns; + bool nans_in_columns = false; + opt = get_options ("column"); + for (size_t i = 0; i != opt.size(); ++i) { + extra_columns.push_back (Math::Stats::CohortDataImport()); + extra_columns[i].initialise (opt[i][0]); + // Check for non-finite values in mask fixels only + // Can't use generic allFinite() function; need to populate matrix data + if (!nans_in_columns) { + matrix_type column_data (importer.size(), num_fixels); + for (size_t j = 0; j != importer.size(); ++j) + (*extra_columns[i][j]) (column_data.row (j)); + if (mask_fixels == num_fixels) { + nans_in_columns = !column_data.allFinite(); + } else { + for (auto l = Loop(0) (mask); l; ++l) { + if (mask.value() && !column_data.col (mask.index(0)).allFinite()) { + nans_in_columns = true; + break; } } - - // Make sure the fixel is fully connected to itself - norm_connectivity_matrix[row].push_back (Stats::CFE::NormMatrixElement (uint32_t(row), connectivity_value_type(1.0))); - smoothing_weights[row].push_back (Stats::CFE::NormMatrixElement (uint32_t(row), connectivity_value_type(gaussian_const1))); - sum_weights += connectivity_value_type(gaussian_const1); - - // Normalise smoothing weights - const connectivity_value_type norm_factor = connectivity_value_type(1.0) / sum_weights; - for (auto i : smoothing_weights[row]) - i.normalise (norm_factor); - - // Force deallocation of memory used for this fixel in the original matrix - std::map().swap (connectivity_matrix[fixel]); - - } else { - - // If fixel is not in the mask, tract_processor should never assign - // any connections to it - assert (connectivity_matrix[fixel].empty()); - } - - progress++; } } + const ssize_t num_factors = design.cols() + extra_columns.size(); + CONSOLE ("Number of factors: " + str(num_factors)); + if (extra_columns.size()) { + CONSOLE ("Number of element-wise design matrix columns: " + str(extra_columns.size())); + if (nans_in_columns) + CONSOLE ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from fixel-wise design matrices accordingly"); + } + Math::Stats::GLM::check_design (design, extra_columns.size()); + + // Load variance groups + auto variance_groups = Math::Stats::GLM::load_variance_groups (design.rows()); + const size_t num_vgs = variance_groups.size() ? variance_groups.maxCoeff()+1 : 1; + if (num_vgs > 1) + CONSOLE ("Number of variance groups: " + str(num_vgs)); + + // Load hypotheses + const vector hypotheses = Math::Stats::GLM::load_hypotheses (argument[3]); + const size_t num_hypotheses = hypotheses.size(); + if (hypotheses[0].cols() != num_factors) + throw Exception ("The number of columns in the contrast matrix (" + str(hypotheses[0].cols()) + ")" + + (extra_columns.size() ? " (in addition to the " + str(extra_columns.size()) + " uses of -column)" : "") + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")"); + CONSOLE ("Number of hypotheses: " + str(num_hypotheses)); + + // Load fixel-fixel connectivity matrix + Fixel::Matrix::Reader matrix (argument[4], mask); - // The connectivity matrix is now in vector rather than matrix form; - // throw out the structure holding the original data - // (Note however that all entries in the original structure should - // have been deleted during the prior loop) - Stats::CFE::init_connectivity_matrix_type().swap (connectivity_matrix); + const std::string output_fixel_directory = argument[5]; + Fixel::copy_index_and_directions_file (input_fixel_directory, output_fixel_directory); + // Do we still want to check whether or not there are any disconnected fixels? + // With the current derivation, disconnected fixels will not possess any self-connectivity, + // and therefore will receive a value of 0 according to the CFE expression. So these + // should actually not interfere at all with the intrinsic normalisation / empirical + // non-stationarity correction. + // It may nevertheless be informative to know whether there are fixels that are included + // in the mask but don't have any connectivity; warn the user that these will be + // zeroed by the enhancement process + index_type num_unconnected_fixels = 0; + for (index_type f = 0; f != num_fixels; ++f) { + mask.index (0) = f; + if (mask.value() && !matrix.size (f)) + ++num_unconnected_fixels; + } + if (num_unconnected_fixels) { + WARN ("A total of " + str(num_unconnected_fixels) + " fixels " + + (mask_fixels == num_fixels ? "" : "in the provided mask ") + + "do not possess any streamlines-based connectivity; " + "these will not be enhanced by CFE, and hence cannot be " + "tested for statistical significance"); + } - Header output_header (header); - output_header.keyval()["num permutations"] = str(num_perms); - output_header.keyval()["dh"] = str(cfe_dh); + Header output_header (dynamic_cast(importer[0].get())->header()); + output_header.keyval()["cfe_dh"] = str(cfe_dh); output_header.keyval()["cfe_e"] = str(cfe_e); output_header.keyval()["cfe_h"] = str(cfe_h); output_header.keyval()["cfe_c"] = str(cfe_c); - output_header.keyval()["angular threshold"] = str(angular_threshold); - output_header.keyval()["connectivity threshold"] = str(connectivity_threshold); - output_header.keyval()["smoothing FWHM"] = str(smooth_std_dev * 2.3548); + output_header.keyval()["cfe_legacy"] = str(cfe_legacy); - - // Load input data - matrix_type data (mask_fixels, identifiers.size()); - data.setZero(); + matrix_type data = matrix_type::Zero (importer.size(), num_fixels); { - ProgressBar progress (std::string ("loading input images") + (do_smoothing ? " and smoothing" : ""), identifiers.size()); - for (size_t subject = 0; subject < identifiers.size(); subject++) { - LogLevelLatch log_level (0); - - auto subject_data = Image::open (identifiers[subject]).with_direct_io(); - vector subject_data_vector (mask_fixels, 0.0); - for (auto i = Loop (index_image, 0, 3)(index_image); i; ++i) { - index_image.index(3) = 1; - uint32_t offset = index_image.value(); - uint32_t fixel_index = 0; - for (auto f = Fixel::Loop (index_image) (subject_data); f; ++f, ++fixel_index) { - if (!std::isfinite(static_cast(subject_data.value()))) - throw Exception ("subject data file " + identifiers[subject] + " contains non-finite value: " + str(subject_data.value())); - // Note that immediately on import, data are re-arranged according to fixel mask - const int32_t row = fixel2row[offset+fixel_index]; - if (row >= 0) - subject_data_vector[row] = subject_data.value(); - } - } - - // Smooth the data - if (do_smoothing) { - for (size_t fixel = 0; fixel < mask_fixels; ++fixel) { - value_type value = 0.0; - for (auto i : smoothing_weights[fixel]) - value += subject_data_vector[i.index()] * i.value(); - data (fixel, subject) = value; - } - } else { - data.col (subject) = Eigen::Map > (subject_data_vector.data(), mask_fixels); - } + ProgressBar progress (std::string ("Loading fixel data (no smoothing)"), importer.size()); + for (size_t subject = 0; subject != importer.size(); subject++) { + (*importer[subject]) (data.row (subject)); progress++; } } + // Detect non-finite values in mask fixels only; NaN-fill other fixels + bool nans_in_data = false; + for (auto l = Loop(0) (mask); l; ++l) { + if (mask.value()) { + if (!data.col (mask.index(0)).allFinite()) + nans_in_data = true; + } else { + data.col (mask.index (0)).fill (NaN); + } + } + if (nans_in_data) { + CONSOLE ("Non-finite values present in data; rows will be removed from fixel-wise design matrices accordingly"); + if (!extra_columns.size()) { + CONSOLE ("(Note that this will result in slower execution than if such values were not present)"); + } + } - // Free the memory occupied by the data smoothing filter; no longer required - Stats::CFE::norm_connectivity_matrix_type().swap (smoothing_weights); + // Only add contrast matrix row number to image outputs if there's more than one hypothesis + auto postfix = [&] (const size_t i) -> std::string { return (num_hypotheses > 1) ? ("_" + hypotheses[i].name()) : ""; }; - if (!data.allFinite()) - throw Exception ("input data contains non-finite value(s)"); { - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation"); - auto temp = Math::Stats::GLM::solve_betas (data, design); + matrix_type betas (num_factors, num_fixels); + matrix_type abs_effect_size (num_fixels, num_hypotheses); + matrix_type std_effect_size (num_fixels, num_hypotheses); + matrix_type stdev (num_vgs, num_fixels); + vector_type cond (num_fixels); + + Math::Stats::GLM::all_stats (data, design, extra_columns, hypotheses, variance_groups, + cond, betas, abs_effect_size, std_effect_size, stdev); - for (ssize_t i = 0; i < contrast.cols(); ++i) { - write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), temp.row(i), fixel2row, output_header); + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_hypotheses) + num_vgs + (nans_in_data || extra_columns.size() ? 1 : 0)); + + for (ssize_t i = 0; i != num_factors; ++i) { + write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), betas.row(i), mask, output_header); + ++progress; + } + for (size_t i = 0; i != num_hypotheses; ++i) { + if (!hypotheses[i].is_F()) { + write_fixel_output (Path::join (output_fixel_directory, "abs_effect" + postfix(i) + ".mif"), abs_effect_size.col(i), mask, output_header); + ++progress; + if (num_vgs == 1) + write_fixel_output (Path::join (output_fixel_directory, "std_effect" + postfix(i) + ".mif"), std_effect_size.col(i), mask, output_header); + } else { + ++progress; + } ++progress; } - temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "abs_effect.mif"), temp.row(0), fixel2row, output_header); ++progress; - temp = Math::Stats::GLM::std_effect_size (data, design, contrast); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_effect.mif"), temp.row(0), fixel2row, output_header); ++progress; - temp = Math::Stats::GLM::stdev (data, design); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), temp.row(0), fixel2row, output_header); + if (nans_in_data || extra_columns.size()) { + write_fixel_output (Path::join (output_fixel_directory, "cond.mif"), cond, mask, output_header); + ++progress; + } + if (num_vgs == 1) { + write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), stdev.row (0), mask, output_header); + } else { + for (size_t i = 0; i != num_vgs; ++i) { + write_fixel_output (Path::join (output_fixel_directory, "std_dev" + str(i) + ".mif"), stdev.row (i), mask, output_header); + ++progress; + } + } + } + + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + if (variance_groups.size()) + glm_test.reset (new Math::Stats::GLM::TestVariableHeteroscedastic (extra_columns, data, design, hypotheses, variance_groups, nans_in_data, nans_in_columns)); + else + glm_test.reset (new Math::Stats::GLM::TestVariableHomoscedastic (extra_columns, data, design, hypotheses, nans_in_data, nans_in_columns)); + } else { + if (variance_groups.size()) + glm_test.reset (new Math::Stats::GLM::TestFixedHeteroscedastic (data, design, hypotheses, variance_groups)); + else + glm_test.reset (new Math::Stats::GLM::TestFixedHomoscedastic (data, design, hypotheses)); } - Math::Stats::GLMTTest glm_ttest (data, design, contrast); - std::shared_ptr cfe_integrator; - cfe_integrator.reset (new Stats::CFE::Enhancer (norm_connectivity_matrix, cfe_dh, cfe_e, cfe_h)); - vector_type empirical_cfe_statistic; + // Construct the class for performing fixel-based statistical enhancement + std::shared_ptr cfe_integrator (new Stats::CFE (matrix, cfe_dh, cfe_e, cfe_h, cfe_c, !cfe_legacy)); // If performing non-stationarity adjustment we need to pre-compute the empirical CFE statistic - if (do_nonstationary_adjustment) { - empirical_cfe_statistic = vector_type::Zero (mask_fixels); - if (permutations_nonstationary.size()) { - Stats::PermTest::PermutationStack permutations (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment"); - Stats::PermTest::precompute_empirical_stat (glm_ttest, cfe_integrator, permutations, empirical_cfe_statistic); - } else { - Stats::PermTest::PermutationStack permutations (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment", false); - Stats::PermTest::precompute_empirical_stat (glm_ttest, cfe_integrator, permutations, empirical_cfe_statistic); - } - output_header.keyval()["nonstationary adjustment"] = str(true); - write_fixel_output (Path::join (output_fixel_directory, "cfe_empirical.mif"), empirical_cfe_statistic, fixel2row, output_header); + matrix_type empirical_cfe_statistic; + if (do_nonstationarity_adjustment) { + Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, empirical_skew, empirical_cfe_statistic); + output_header.keyval()["nonstationarity_adjustment"] = str(true); + for (size_t i = 0; i != num_hypotheses; ++i) + write_fixel_output (Path::join (output_fixel_directory, "cfe_empirical" + postfix(i) + ".mif"), empirical_cfe_statistic.col(i), mask, output_header); } else { - output_header.keyval()["nonstationary adjustment"] = str(false); + output_header.keyval()["nonstationarity_adjustment"] = str(false); } // Precompute default statistic and CFE statistic - vector_type cfe_output (mask_fixels); - std::shared_ptr cfe_output_neg; - vector_type tvalue_output (mask_fixels); - if (compute_negative_contrast) - cfe_output_neg.reset (new vector_type (mask_fixels)); - - Stats::PermTest::precompute_default_permutation (glm_ttest, cfe_integrator, empirical_cfe_statistic, cfe_output, cfe_output_neg, tvalue_output); - - write_fixel_output (Path::join (output_fixel_directory, "cfe.mif"), cfe_output, fixel2row, output_header); - write_fixel_output (Path::join (output_fixel_directory, "tvalue.mif"), tvalue_output, fixel2row, output_header); - if (compute_negative_contrast) - write_fixel_output (Path::join (output_fixel_directory, "cfe_neg.mif"), *cfe_output_neg, fixel2row, output_header); + matrix_type default_statistic, default_zstat, default_enhanced; + Stats::PermTest::precompute_default_permutation (glm_test, cfe_integrator, empirical_cfe_statistic, default_statistic, default_zstat, default_enhanced); + for (size_t i = 0; i != num_hypotheses; ++i) { + write_fixel_output (Path::join (output_fixel_directory, (hypotheses[i].is_F() ? std::string("F") : std::string("t")) + "value" + postfix(i) + ".mif"), default_statistic.col(i), mask, output_header); + write_fixel_output (Path::join (output_fixel_directory, "Zstat" + postfix(i) + ".mif"), default_zstat.col(i), mask, output_header); + write_fixel_output (Path::join (output_fixel_directory, "cfe" + postfix(i) + ".mif"), default_enhanced.col(i), mask, output_header); + } // Perform permutation testing if (!get_options ("notest").size()) { - vector_type perm_distribution (num_perms); - std::shared_ptr perm_distribution_neg; - vector_type uncorrected_pvalues (mask_fixels); - std::shared_ptr uncorrected_pvalues_neg; - - if (compute_negative_contrast) { - perm_distribution_neg.reset (new vector_type (num_perms)); - uncorrected_pvalues_neg.reset (new vector_type (mask_fixels)); + + const bool fwe_strong = get_option_value ("strong", false); + if (fwe_strong && num_hypotheses == 1) { + WARN("Option -strong has no effect when testing a single hypothesis only"); } - // FIXME fixelcfestats is hanging here for some reason... - // Even when no mask is supplied + matrix_type null_distribution, uncorrected_pvalues; + count_matrix_type null_contributions; + Stats::PermTest::run_permutations (glm_test, cfe_integrator, empirical_cfe_statistic, default_enhanced, fwe_strong, + null_distribution, null_contributions, uncorrected_pvalues); + + ProgressBar progress ("Outputting final results", (fwe_strong ? 1 : num_hypotheses) + 1 + 3*num_hypotheses); - if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_ttest, cfe_integrator, empirical_cfe_statistic, - cfe_output, cfe_output_neg, - perm_distribution, perm_distribution_neg, - uncorrected_pvalues, uncorrected_pvalues_neg); + if (fwe_strong) { + save_vector (null_distribution.col(0), Path::join (output_fixel_directory, "null_dist.txt")); + ++progress; } else { - Stats::PermTest::run_permutations (num_perms, glm_ttest, cfe_integrator, empirical_cfe_statistic, - cfe_output, cfe_output_neg, - perm_distribution, perm_distribution_neg, - uncorrected_pvalues, uncorrected_pvalues_neg); + for (size_t i = 0; i != num_hypotheses; ++i) { + save_vector (null_distribution.col(i), Path::join (output_fixel_directory, "null_dist" + postfix(i) + ".txt")); + ++progress; + } } - ProgressBar progress ("outputting final results"); - save_matrix (perm_distribution, Path::join (output_fixel_directory, "perm_dist.txt")); ++progress; - - vector_type pvalue_output (mask_fixels); - Math::Stats::Permutation::statistic2pvalue (perm_distribution, cfe_output, pvalue_output); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue.mif"), pvalue_output, fixel2row, output_header); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue.mif"), uncorrected_pvalues, fixel2row, output_header); ++progress; - - if (compute_negative_contrast) { - save_matrix (*perm_distribution_neg, Path::join (output_fixel_directory, "perm_dist_neg.txt")); ++progress; - vector_type pvalue_output_neg (mask_fixels); - Math::Stats::Permutation::statistic2pvalue (*perm_distribution_neg, *cfe_output_neg, pvalue_output_neg); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue_neg.mif"), pvalue_output_neg, fixel2row, output_header); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue_neg.mif"), *uncorrected_pvalues_neg, fixel2row, output_header); + const matrix_type pvalue_output = MR::Math::Stats::fwe_pvalue (null_distribution, default_enhanced); + ++progress; + for (size_t i = 0; i != num_hypotheses; ++i) { + write_fixel_output (Path::join (output_fixel_directory, "fwe_1mpvalue" + postfix(i) + ".mif"), pvalue_output.col(i), mask, output_header); + ++progress; + write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue" + postfix(i) + ".mif"), uncorrected_pvalues.col(i), mask, output_header); + ++progress; + write_fixel_output (Path::join (output_fixel_directory, "null_contributions" + postfix(i) + ".mif"), null_contributions.col(i), mask, output_header); + ++progress; } } } diff --git a/cmd/fixelconnectivity.cpp b/cmd/fixelconnectivity.cpp new file mode 100644 index 0000000000..19ea2e5d38 --- /dev/null +++ b/cmd/fixelconnectivity.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * + * MRtrix3 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/ + */ + + +#include "command.h" +#include "fixel/helpers.h" +#include "fixel/index_remapper.h" +#include "fixel/matrix.h" + + +#define DEFAULT_ANGLE_THRESHOLD 45.0 +#define DEFAULT_CONNECTIVITY_THRESHOLD 0.01 + + +using namespace MR; +using namespace App; + +void usage () +{ + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Generate a fixel-fixel connectivity matrix"; + + DESCRIPTION + + "This command will generate a directory containing three images, which encodes the " + "fixel-fixel connectivity matrix. Documentation regarding this format and how to " + "use it will come in the future."; + + ARGUMENTS + + Argument ("fixel_directory", "the directory containing the fixels between which connectivity will be quantified").type_directory_in() + + + Argument ("tracks", "the tracks used to determine fixel-fixel connectivity").type_tracks_in() + + + Argument ("matrix", "the output fixel-fixel connectivity matrix directory path").type_directory_out(); + + + OPTIONS + + OptionGroup ("Options that influence generation of the connectivity matrix / matrices") + + + Option ("threshold", "a threshold to define the required fraction of shared connections to be included in the neighbourhood (default: " + str(DEFAULT_CONNECTIVITY_THRESHOLD, 2) + ")") + + Argument ("value").type_float (0.0, 1.0) + + + Option ("angle", "the max angle threshold for assigning streamline tangents to fixels (Default: " + str(DEFAULT_ANGLE_THRESHOLD, 2) + " degrees)") + + Argument ("value").type_float (0.0, 90.0) + + + Option ("mask", "provide a fixel data file containing a mask of those fixels to be computed; fixels outside the mask will be empty in the output matrix") + + Argument ("file").type_image_in(); + +} + + +using value_type = float; +using Fixel::index_type; + + +void run() +{ + const value_type connectivity_threshold = get_option_value ("connectivity", value_type(DEFAULT_CONNECTIVITY_THRESHOLD)); + const value_type angular_threshold = get_option_value ("angle", value_type(DEFAULT_ANGLE_THRESHOLD)); + + const std::string input_fixel_directory = argument[0]; + Header index_header = Fixel::find_index_header (input_fixel_directory); + auto index_image = index_header.get_image(); + const index_type num_fixels = Fixel::get_number_of_fixels (index_image); + + // When provided with a mask, this only influences which fixels get their connectivity quantified; + // these will appear empty in the output matrix + auto opt = get_options ("mask"); + Image fixel_mask; + if (opt.size()) { + fixel_mask = Image::open (opt[0][0]); + Fixel::check_data_file (fixel_mask); + if (!Fixel::fixels_match (index_header, fixel_mask)) + throw Exception ("Mask image provided using -mask option does not match input fixel directory"); + } else { + Header mask_header = Fixel::data_header_from_index (index_header); + mask_header.datatype() = DataType::Bit; + fixel_mask = Image::scratch (mask_header, "true-filled scratch fixel mask"); + for (fixel_mask.index(0) = 0; fixel_mask.index(0) != num_fixels; ++fixel_mask.index(0)) + fixel_mask.value() = true; + } + + auto connectivity_matrix = Fixel::Matrix::generate (argument[1], + index_image, + fixel_mask, + angular_threshold); + + Fixel::Matrix::normalise_and_write (connectivity_matrix, + connectivity_threshold, + argument[2]); + +} + diff --git a/cmd/fixelconvert.cpp b/cmd/fixelconvert.cpp index 3621e1252d..8cd2d59a1b 100644 --- a/cmd/fixelconvert.cpp +++ b/cmd/fixelconvert.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "progressbar.h" @@ -24,6 +25,7 @@ #include "fixel/helpers.h" #include "fixel/keys.h" #include "fixel/loop.h" +#include "fixel/types.h" #include "fixel/legacy/fixel_metric.h" #include "fixel/legacy/keys.h" @@ -33,6 +35,7 @@ using namespace MR; using namespace App; +using Fixel::index_type; using Fixel::Legacy::FixelMetric; @@ -43,6 +46,32 @@ void usage () SYNOPSIS = "Convert between the old format fixel image (.msf / .msh) and the new fixel directory format"; + EXAMPLES + + Example ("Convert from the old file format to the new directory format", + "fixelconvert old_fixels.msf new_fixels/ -out_size", + "This performs a simple conversion from old to new format, and " + "additionally writes the contents of the \"size\" field within " + "old-format fixel images stored using the \"FixelMetric\" class " + "(likely all of them) as an additional fixel data file.") + + + Example ("Convert multiple files from old to new format, preserving fixel correspondence", + "for_each *.msf : fixelconvert IN NAME_new/ -template template_fixels/", + "In this example, the for_each script is used to execute the fixelconvert " + "command once for each of a series of input files in the old fixel format, " + "generating a new output fixel directory for each." + "Importantly here though, the -template option is used to ensure that the " + "ordering of fixels within these output directories is identical, such that " + "fixel data files can be exchanged between them (e.g. accumulating fixel " + "data files across subjects into a single template fixel directory") + + + Example ("Convert from the new directory format to the old file format", + "fixelconvert new_fixels/ old_fixels.msf -value parameter.mif -in_size new_fixels/afd.mif", + "Conversion from the new directory format will contain the value 1.0 " + "for all output fixels in both the \"size\" and \"value\" fields of the " + "\"FixelMetric\" class, unless the -in_size and/or -value options are " + "used respectively to indicate which fixel data files should be used as " + "the source(s) of this information."); + ARGUMENTS + Argument ("fixel_in", "the input fixel file / directory.").type_various() + Argument ("fixel_out", "the output fixel file / directory.").type_various(); @@ -87,7 +116,7 @@ void convert_old2new () const std::string output_fixel_directory = argument[1]; Fixel::check_fixel_directory (output_fixel_directory, true); - uint32_t fixel_count = 0; + index_type fixel_count = 0; for (auto i = Loop (input) (input); i; ++i) fixel_count += input.value().size(); @@ -105,29 +134,29 @@ void convert_old2new () header.keyval()[Fixel::n_fixels_key] = str(fixel_count); header.ndim() = 4; header.size(3) = 2; - header.datatype() = DataType::from(); + header.datatype() = DataType::from(); header.datatype().set_byte_order_native(); - auto index_image = Image::create (Path::join (output_fixel_directory, "index" + file_extension), header); + auto index_image = Image::create (Path::join (output_fixel_directory, "index" + file_extension), header); auto directions_image = Image::create (Path::join (output_fixel_directory, "directions" + file_extension), directions_header).with_direct_io(); auto value_image = Image::create (Path::join (output_fixel_directory, value_name + file_extension), data_header); Image size_image; if (output_size) size_image = Image::create (Path::join (output_fixel_directory, "size" + file_extension), data_header); - Image template_index_image; + Image template_index_image; Image template_directions_image; opt = get_options ("template"); if (opt.size()) { Fixel::check_fixel_directory (opt[0][0]); - template_index_image = Fixel::find_index_header (opt[0][0]).get_image(); + template_index_image = Fixel::find_index_header (opt[0][0]).get_image(); check_dimensions (index_image, template_index_image); template_directions_image = Fixel::find_directions_header (opt[0][0]).get_image(); } - uint32_t offset = 0; + index_type offset = 0; for (auto i = Loop ("converting fixel format", input, 0, 3) (input, index_image); i; ++i) { - const uint32_t num_fixels = input.value().size(); + const index_type num_fixels = input.value().size(); if (template_index_image.valid()) { assign_pos_of (index_image).to (template_index_image); template_index_image.index(3) = 0; @@ -201,7 +230,7 @@ void convert_new2old () H_out.keyval()[Fixel::Legacy::size_key] = str(sizeof(FixelMetric)); Fixel::Legacy::Image out_image (argument[1], H_out); - auto index_image = H_index.get_image(); + auto index_image = H_index.get_image(); auto dirs_image = H_dirs.get_image(); auto value_image = H_data[value_index].get_image(); Image size_image; @@ -210,7 +239,7 @@ void convert_new2old () for (auto l = Loop (out_image) (out_image, index_image); l; ++l) { index_image.index(3) = 0; - const uint32_t num_fixels = index_image.value(); + const index_type num_fixels = index_image.value(); out_image.value().set_size (num_fixels); for (auto f = Fixel::Loop (index_image) (dirs_image, value_image); f; ++f) { // Construct the direction diff --git a/cmd/fixelcorrespondence.cpp b/cmd/fixelcorrespondence.cpp index 5e03b46b19..e245bf9c4f 100644 --- a/cmd/fixelcorrespondence.cpp +++ b/cmd/fixelcorrespondence.cpp @@ -1,28 +1,32 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "algo/loop.h" #include "image.h" #include "fixel/helpers.h" #include "fixel/keys.h" +#include "fixel/types.h" using namespace MR; using namespace App; +using Fixel::index_type; + #define DEFAULT_ANGLE_THRESHOLD 45.0 void usage () @@ -56,7 +60,7 @@ void run () if (Path::is_dir (input_file)) throw Exception ("please input the specific fixel data file to be converted (not the fixel directory)"); - auto subject_index = Fixel::find_index_header (Fixel::get_fixel_directory (input_file)).get_image(); + auto subject_index = Fixel::find_index_header (Fixel::get_fixel_directory (input_file)).get_image(); auto subject_directions = Fixel::find_directions_header (Fixel::get_fixel_directory (input_file)).get_image().with_direct_io(); if (input_file == subject_directions.name()) @@ -65,7 +69,7 @@ void run () auto subject_data = Image::open (input_file); Fixel::check_fixel_size (subject_index, subject_data); - auto template_index = Fixel::find_index_header (argument[1]).get_image(); + auto template_index = Fixel::find_index_header (argument[1]).get_image(); auto template_directions = Fixel::find_directions_header (argument[1]).get_image().with_direct_io(); check_dimensions (subject_index, template_index); @@ -78,22 +82,22 @@ void run () for (auto i = Loop ("mapping subject fixel data to template fixels", template_index, 0, 3)(template_index, subject_index); i; ++i) { template_index.index(3) = 0; - uint32_t nfixels_template = template_index.value(); + index_type nfixels_template = template_index.value(); template_index.index(3) = 1; - uint32_t template_offset = template_index.value(); + index_type template_offset = template_index.value(); - for (size_t t = 0; t < nfixels_template; ++t) { + for (index_type t = 0; t < nfixels_template; ++t) { float largest_dp = 0.0; int index_of_closest_fixel = -1; subject_index.index(3) = 0; - uint32_t nfixels_subject = subject_index.value(); + index_type nfixels_subject = subject_index.value(); subject_index.index(3) = 1; - uint32_t subject_offset = subject_index.value(); + index_type subject_offset = subject_index.value(); template_directions.index(0) = template_offset + t; - for (size_t s = 0; s < nfixels_subject; ++s) { + for (index_type s = 0; s < nfixels_subject; ++s) { subject_directions.index(0) = subject_offset + s; Eigen::Vector3f templatedir = template_directions.row(1); diff --git a/cmd/fixelcrop.cpp b/cmd/fixelcrop.cpp index d129e42bfb..8a364eace3 100644 --- a/cmd/fixelcrop.cpp +++ b/cmd/fixelcrop.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "algo/loop.h" @@ -21,10 +22,14 @@ #include "fixel/helpers.h" #include "fixel/keys.h" +#include "fixel/types.h" +#include "fixel/types.h" using namespace MR; using namespace App; +using Fixel::index_type; + void usage () { @@ -50,7 +55,7 @@ void run () const auto in_directory = argument[0]; Fixel::check_fixel_directory (in_directory); Header in_index_header = Fixel::find_index_header (in_directory); - auto in_index_image = in_index_header.get_image (); + auto in_index_image = in_index_header.get_image (); auto mask_image = Image::open (argument[1]); Fixel::check_fixel_size (in_index_image, mask_image); @@ -59,7 +64,7 @@ void run () Fixel::check_fixel_directory (out_fixel_directory, true); Header out_header = Header (in_index_image); - size_t total_nfixels = Fixel::get_number_of_fixels (in_index_header); + index_type total_nfixels = Fixel::get_number_of_fixels (in_index_header); // We need to do a first pass of the mask image to determine the number of cropped fixels for (auto l = Loop (0) (mask_image); l; ++l) { @@ -68,7 +73,7 @@ void run () } out_header.keyval ()[Fixel::n_fixels_key] = str (total_nfixels); - auto out_index_image = Image::create (Path::join (out_fixel_directory, Path::basename (in_index_image.name())), out_header); + auto out_index_image = Image::create (Path::join (out_fixel_directory, Path::basename (in_index_image.name())), out_header); // Open all data images and create output date images with size equal to expected number of fixels @@ -86,16 +91,16 @@ void run () } mask_image.index (1) = 0; - uint32_t out_offset = 0; + index_type out_offset = 0; for (auto l = Loop ("cropping fixel image", 0, 3) (in_index_image, out_index_image); l; ++l) { in_index_image.index(3) = 0; - size_t in_nfixels = in_index_image.value(); + index_type in_nfixels = in_index_image.value(); in_index_image.index(3) = 1; - uint32_t in_offset = in_index_image.value(); + index_type in_offset = in_index_image.value(); - size_t out_nfixels = 0; - for (size_t i = 0; i < in_nfixels; ++i) { + index_type out_nfixels = 0; + for (index_type i = 0; i < in_nfixels; ++i) { mask_image.index(0) = in_offset + i; if (mask_image.value()) { for (size_t d = 0; d < in_data_images.size(); ++d) { diff --git a/cmd/fixelfilter.cpp b/cmd/fixelfilter.cpp new file mode 100644 index 0000000000..f8b7850aa1 --- /dev/null +++ b/cmd/fixelfilter.cpp @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * + * MRtrix3 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/ + */ + + +#include "command.h" +#include "header.h" +#include "image.h" +#include "progressbar.h" +#include "file/path.h" +#include "file/utils.h" +#include "fixel/helpers.h" + +#include "fixel/filter/base.h" +#include "fixel/filter/connect.h" +#include "fixel/filter/smooth.h" +#include "fixel/matrix.h" +#include "stats/cfe.h" + + + +using namespace MR; +using namespace App; +using namespace MR::Fixel; + +const char* const filters[] = { "connect", "smooth", nullptr }; + +void usage () +{ + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Perform filtering operations on fixel-based data"; + + DESCRIPTION + + "If the first input to the command is a specific fixel data file, then a filtered version of only that file " + "will be generated by the command. Alternatively, if the input is the location of a fixel directory, then the " + "command will create a duplicate of the fixel directory, and apply the specified filter operation to all " + "fixel data files within the directory." + + + "For the smoothing filter, it is typically recommended that the provided fixel-fixel connectivity matrix should " + "include both streamline-based connection strength and spatial distance between fixels; this can be generated " + "by the MRtrix3 command fixelconnectivity, specifically using the -smoothing option."; + + ARGUMENTS + + Argument ("input", "the input: either a fixel data file, or a fixel directory (see Description)").type_various() + + Argument ("filter", "the filtering operation to perform; options are: " + join (filters, ", ")).type_choice (filters) + + Argument ("output", "the output: either a fixel data file, or a fixel directory (see Description)").type_various(); + + OPTIONS + + Option ("matrix", "provide a fixel-fixel connectivity matrix for filtering operations that require it").required() + + Argument ("file").type_directory_in() + + + OptionGroup ("Options specific to the \"connect\" filter") + + Option ("threshold_value", "specify a threshold for the input fixel data file values " + "(default = " + str(DEFAULT_FIXEL_CONNECT_VALUE_THRESHOLD) + ")") + + Argument ("value").type_float () + + Option ("threshold_connectivity", "specify a fixel-fixel connectivity threshold for connected-component analysis " + "(default = " + str(DEFAULT_FIXEL_CONNECT_CONNECTIVITY_THRESHOLD) + ")") + + Argument ("value").type_float (0.0) + + + OptionGroup ("Options specific to the \"smooth\" filter") + + Option ("fwhm", "the full-width half-maximum (FWHM) of the spatial component of the smoothing filter " + "(default = " + str(DEFAULT_FIXEL_SMOOTHING_FWHM) + "mm)") + + Argument ("value").type_float (0.0) + + Option ("minweight", "apply a minimum threshold to smoothing weights " + "(default = " + str(DEFAULT_FIXEL_SMOOTHING_MINWEIGHT) + ")") + + Argument ("value").type_float (0.0) + + Option ("mask", "only perform smoothing within a specified binary fixel mask") + + Argument ("image").type_image_in(); + +} + + + +using value_type = float; + + + +void run() +{ + + std::set option_list { "threhsold_value", + "threshold_connectivity", + "fwhm", + "minweight", + "mask" }; + + Header index_header; + Image single_file; + vector
multiple_files; + Header output_header; + try { + index_header = Fixel::find_index_header (argument[0]); + multiple_files = Fixel::find_data_headers (argument[0], index_header); + if (multiple_files.empty()) + throw Exception ("No fixel data files found in directory \"" + argument[0] + "\""); + output_header = Header (multiple_files[0]); + } catch (...) { + try { + index_header = Fixel::find_index_header (Fixel::get_fixel_directory (argument[0])); + single_file = Image::open (argument[0]); + Fixel::check_data_file (single_file); + output_header = Header (single_file); + } catch (...) { + throw Exception ("Could not interpret first argument \"" + argument[0] + "\" as either a fixel data file, or a fixel directory"); + } + } + + if (single_file.valid() && !Fixel::fixels_match (index_header, single_file)) + throw Exception ("File \"" + argument[0] + "\" is not a valid fixel data file (does not match corresponding index image)"); + + auto opt = get_options ("matrix"); + Fixel::Matrix::Reader matrix (opt[0][0]); + + Image index_image = index_header.get_image(); + const size_t nfixels = Fixel::get_number_of_fixels (index_image); + if (nfixels != matrix.size()) + throw Exception ("Number of fixels in input (" + str(nfixels) + ") does not match number of fixels in connectivity matrix (" + str(matrix.size()) + ")"); + + std::unique_ptr filter; + switch (int(argument[1])) { + case 0: + { + const float value = get_option_value ("threshold_value", float(DEFAULT_FIXEL_CONNECT_VALUE_THRESHOLD)); + const float connect = get_option_value ("threshold_connectivity", float(DEFAULT_FIXEL_CONNECT_CONNECTIVITY_THRESHOLD)); + filter.reset (new Fixel::Filter::Connect (matrix, value, connect)); + output_header.datatype() = DataType::UInt32; + output_header.datatype().set_byte_order_native(); + option_list.erase ("threshold_value"); + option_list.erase ("threshold_connectivity"); + } + break; + case 1: + { + const float fwhm = get_option_value ("fwhm", float(DEFAULT_FIXEL_SMOOTHING_FWHM)); + const float threshold = get_option_value ("minweight", float(DEFAULT_FIXEL_SMOOTHING_MINWEIGHT)); + opt = get_options ("mask"); + if (opt.size()) { + Image mask_image = Image::open (opt[0][0]); + filter.reset (new Fixel::Filter::Smooth (index_image, matrix, mask_image, fwhm, threshold)); + } else { + filter.reset (new Fixel::Filter::Smooth (index_image, matrix, fwhm, threshold)); + } + option_list.erase ("fwhm"); + option_list.erase ("minweight"); + option_list.erase ("mask"); + } + break; + default: + assert (0); + } + + for (const auto& i : option_list) { + if (get_options (i).size()) + WARN ("Option -" + i + " ignored; not relevant to " + filters[int(argument[1])] + " filter"); + } + + if (single_file.valid()) { + auto output_image = Image::create (argument[2], single_file); + CONSOLE (std::string ("Applying \"") + filters[argument[1]] + "\" operation to fixel data file \"" + single_file.name() + "\""); + (*filter) (single_file, output_image); + } else { + Fixel::copy_index_and_directions_file (argument[0], argument[2]); + ProgressBar progress (std::string ("Applying \"") + filters[argument[1]] + "\" operation to " + str(multiple_files.size()) + " fixel data files", + multiple_files.size()); + for (auto& H : multiple_files) { + auto input_image = H.get_image(); + auto output_image = Image::create (Path::join (argument[2], Path::basename (H.name())), H); + (*filter) (input_image, output_image); + ++progress; + } + } + +} diff --git a/cmd/fixelreorient.cpp b/cmd/fixelreorient.cpp index 77969842e0..febd8ac12a 100644 --- a/cmd/fixelreorient.cpp +++ b/cmd/fixelreorient.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "algo/loop.h" @@ -20,11 +21,13 @@ #include "adapter/jacobian.h" #include "registration/warp/helpers.h" +#include "fixel/helpers.h" +#include "fixel/keys.h" + using namespace MR; using namespace App; -#include "fixel/helpers.h" -#include "fixel/keys.h" +using Fixel::index_type; void usage () { @@ -53,7 +56,7 @@ void run () std::string input_fixel_directory = argument[0]; Fixel::check_fixel_directory (input_fixel_directory); - auto input_index_image = Fixel::find_index_header (input_fixel_directory).get_image (); + auto input_index_image = Fixel::find_index_header (input_fixel_directory).get_image (); Header warp_header = Header::open (argument[1]); Registration::Warp::check_warp (warp_header); @@ -77,12 +80,12 @@ void run () for (auto i = Loop ("reorienting fixel directions", input_index_image, 0, 3)(input_index_image, jacobian); i; ++i) { input_index_image.index(3) = 0; - uint32_t num_fixels_in_voxel = input_index_image.value(); + index_type num_fixels_in_voxel = input_index_image.value(); if (num_fixels_in_voxel) { input_index_image.index(3) = 1; - uint32_t index = input_index_image.value(); + index_type index = input_index_image.value(); Eigen::Matrix transform = jacobian.value().inverse(); - for (size_t f = 0; f < num_fixels_in_voxel; ++f) { + for (index_type f = 0; f < num_fixels_in_voxel; ++f) { input_directions_image.index(0) = index + f; output_directions_image.index(0) = index + f; output_directions_image.row(1) = (transform * Eigen::Vector3f (input_directions_image.row(1))).normalized(); diff --git a/cmd/fod2dec.cpp b/cmd/fod2dec.cpp index a5286b56bf..4bd77790e2 100644 --- a/cmd/fod2dec.cpp +++ b/cmd/fod2dec.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" @@ -50,10 +51,10 @@ void usage () REFERENCES + "Dhollander T, Smith RE, Tournier JD, Jeurissen B, Connelly A. " // Internal "Time to move on: an FOD-based DEC map to replace DTI's trademark DEC FA. " - "Proc Intl Soc Mag Reson Med, 2015, 23, 1027." + "Proc Intl Soc Mag Reson Med, 2015, 23, 1027" + "Dhollander T, Raffelt D, Smith RE, Connelly A. " // Internal "Panchromatic sharpening of FOD-based DEC maps by structural T1 information. " - "Proc Intl Soc Mag Reson Med, 2015, 23, 566."; + "Proc Intl Soc Mag Reson Med, 2015, 23, 566"; ARGUMENTS + Argument ("input","The input FOD image (spherical harmonic coefficients).").type_image_in () @@ -280,7 +281,7 @@ void run () { } ThreadedLoop ("computing colours", fod_img, 0, 3) - .run (DecComputer (DecTransform (Math::SH::LforN(fod_img.size(3)), dirs, thresh), mask_img, w_img), fod_img, dec_img); + .run (DecComputer (DecTransform (Math::SH::LforN(fod_img.size(3)), dirs, thresh), mask_img, w_img), fod_img, dec_img); } auto out_hdr = map_hdr.valid() ? Header(map_hdr) : Header(dec_img); diff --git a/cmd/fod2fixel.cpp b/cmd/fod2fixel.cpp index c833761aa1..f560b45c1a 100644 --- a/cmd/fod2fixel.cpp +++ b/cmd/fod2fixel.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" @@ -20,6 +21,7 @@ #include "fixel/keys.h" #include "fixel/helpers.h" +#include "fixel/types.h" #include "math/SH.h" @@ -38,6 +40,7 @@ using namespace MR::DWI; using namespace MR::DWI::FMLS; using namespace App; +using Fixel::index_type; const OptionGroup OutputOptions = OptionGroup ("Metric values for fixel-based sparse output images") @@ -46,12 +49,12 @@ const OptionGroup OutputOptions = OptionGroup ("Metric values for fixel-based sp "output the total Apparent Fibre Density per fixel (integral of FOD lobe)") + Argument ("image").type_image_out() - + Option ("peak", - "output the peak FOD amplitude per fixel") + + Option ("peak_amp", + "output the amplitude of the FOD at the maximal peak per fixel") + Argument ("image").type_image_out() + Option ("disp", - "output a measure of dispersion per fixel as the ratio between FOD lobe integral and peak amplitude") + "output a measure of dispersion per fixel as the ratio between FOD lobe integral and maximal peak amplitude") + Argument ("image").type_image_out(); @@ -73,7 +76,7 @@ void usage () + "* Reference for Apparent Fibre Density (AFD):\n" "Raffelt, D.; Tournier, J.-D.; Rose, S.; Ridgway, G.R.; Henderson, R.; Crozier, S.; Salvado, O.; Connelly, A. " // Internal "Apparent Fibre Density: a novel measure for the analysis of diffusion-weighted magnetic resonance images." - "Neuroimage, 2012, 15;59(4), 3976-94."; + "Neuroimage, 2012, 15;59(4), 3976-94"; ARGUMENTS + Argument ("fod", "the input fod image.").type_image_in () @@ -88,8 +91,7 @@ void usage () + OptionGroup ("Other options for fod2fixel") - + Option ("mask", - "only perform computation within the specified binary brain mask image.") + + Option ("mask", "only perform computation within the specified binary brain mask image.") + Argument ("image").type_image_in() + Option ("maxnum", "maximum number of fixels to output for any particular voxel (default: no limit)") @@ -97,7 +99,7 @@ void usage () + Option ("nii", "output the directions and index file in nii format (instead of the default mif)") - + Option ("dirpeak", "define the fixel direction as the peak lobe direction as opposed to the lobe mean"); + + Option ("dirpeak", "define the fixel direction as that of the lobe's maximal peak as opposed to its weighted mean direction (the default)"); } @@ -106,8 +108,8 @@ void usage () class Segmented_FOD_receiver { MEMALIGN(Segmented_FOD_receiver) public: - Segmented_FOD_receiver (const Header& header, const uint32_t maxnum = 0, bool dir_as_peak = false) : - H (header), fixel_count (0), max_per_voxel (maxnum), dir_as_peak (dir_as_peak) { } + Segmented_FOD_receiver (const Header& header, const index_type maxnum = 0, bool dir_from_peak = false) : + H (header), fixel_count (0), max_per_voxel (maxnum), dir_from_peak (dir_from_peak) { } void commit (); @@ -115,7 +117,7 @@ class Segmented_FOD_receiver { MEMALIGN(Segmented_FOD_receiver) void set_index_output (const std::string& path) { index_path = path; } void set_directions_output (const std::string& path) { dir_path = path; } void set_afd_output (const std::string& path) { afd_path = path; } - void set_peak_output (const std::string& path) { peak_path = path; } + void set_peak_amp_output (const std::string& path) { peak_amp_path = path; } void set_disp_output (const std::string& path) { disp_path = path; } bool operator() (const FOD_lobes&); @@ -126,21 +128,21 @@ class Segmented_FOD_receiver { MEMALIGN(Segmented_FOD_receiver) struct Primitive_FOD_lobe { MEMALIGN (Primitive_FOD_lobe) Eigen::Vector3f dir; float integral; - float peak_value; - Primitive_FOD_lobe (Eigen::Vector3f dir, float integral, float peak_value) : - dir (dir), integral (integral), peak_value (peak_value) {} + float max_peak_amp; + Primitive_FOD_lobe (Eigen::Vector3f dir, float integral, float max_peak_amp) : + dir (dir), integral (integral), max_peak_amp (max_peak_amp) {} }; class Primitive_FOD_lobes : public vector { MEMALIGN (Primitive_FOD_lobes) public: - Primitive_FOD_lobes (const FOD_lobes& in, const uint32_t maxcount, bool use_peak_dir) : + Primitive_FOD_lobes (const FOD_lobes& in, const index_type maxcount, bool dir_from_peak) : vox (in.vox) { - const uint32_t N = maxcount ? std::min (uint32_t(in.size()), maxcount) : in.size(); - for (uint32_t i = 0; i != N; ++i) { + const index_type N = maxcount ? std::min (index_type(in.size()), maxcount) : in.size(); + for (index_type i = 0; i != N; ++i) { const FOD_lobe& lobe (in[i]); - if (use_peak_dir) + if (dir_from_peak) this->emplace_back (lobe.get_peak_dir(0).cast(), lobe.get_integral(), lobe.get_max_peak_value()); else this->emplace_back (lobe.get_mean_dir().cast(), lobe.get_integral(), lobe.get_max_peak_value()); @@ -150,11 +152,11 @@ class Segmented_FOD_receiver { MEMALIGN(Segmented_FOD_receiver) }; Header H; - std::string fixel_directory_path, index_path, dir_path, afd_path, peak_path, disp_path; + std::string fixel_directory_path, index_path, dir_path, afd_path, peak_amp_path, disp_path; vector lobes; - uint32_t fixel_count; - uint32_t max_per_voxel; - bool dir_as_peak; + index_type fixel_count; + index_type max_per_voxel; + bool dir_from_peak; }; @@ -163,7 +165,7 @@ class Segmented_FOD_receiver { MEMALIGN(Segmented_FOD_receiver) bool Segmented_FOD_receiver::operator() (const FOD_lobes& in) { if (in.size()) { - lobes.emplace_back (in, max_per_voxel, dir_as_peak); + lobes.emplace_back (in, max_per_voxel, dir_from_peak); fixel_count += lobes.back().size(); } return true; @@ -177,21 +179,21 @@ void Segmented_FOD_receiver::commit () return; using DataImage = Image; - using IndexImage = Image; + using IndexImage = Image; const auto index_filepath = Path::join (fixel_directory_path, index_path); std::unique_ptr index_image; std::unique_ptr dir_image; std::unique_ptr afd_image; - std::unique_ptr peak_image; + std::unique_ptr peak_amp_image; std::unique_ptr disp_image; auto index_header (H); index_header.keyval()[Fixel::n_fixels_key] = str(fixel_count); index_header.ndim() = 4; index_header.size(3) = 2; - index_header.datatype() = DataType::from(); + index_header.datatype() = DataType::from(); index_header.datatype().set_byte_order_native(); index_image = make_unique (IndexImage::create (index_filepath, index_header)); @@ -218,12 +220,12 @@ void Segmented_FOD_receiver::commit () Fixel::check_fixel_size (*index_image, *afd_image); } - if (peak_path.size()) { - auto peak_header(fixel_data_header); - peak_header.size(1) = 1; - peak_image = make_unique (DataImage::create (Path::join(fixel_directory_path, peak_path), peak_header)); - peak_image->index(1) = 0; - Fixel::check_fixel_size (*index_image, *peak_image); + if (peak_amp_path.size()) { + auto peak_amp_header (fixel_data_header); + peak_amp_header.size(1) = 1; + peak_amp_image = make_unique (DataImage::create (Path::join(fixel_directory_path, peak_amp_path), peak_amp_header)); + peak_amp_image->index(1) = 0; + Fixel::check_fixel_size (*index_image, *peak_amp_image); } if (disp_path.size()) { @@ -262,17 +264,17 @@ void Segmented_FOD_receiver::commit () } } - if (peak_image) { + if (peak_amp_image) { for (size_t i = 0; i < n_vox_fixels; ++i) { - peak_image->index(0) = offset + i; - peak_image->value() = vox_fixels[i].peak_value; + peak_amp_image->index(0) = offset + i; + peak_amp_image->value() = vox_fixels[i].max_peak_amp; } } if (disp_image) { for (size_t i = 0; i < n_vox_fixels; ++i) { disp_image->index(0) = offset + i; - disp_image->value() = vox_fixels[i].integral / vox_fixels[i].peak_value; + disp_image->value() = vox_fixels[i].integral / vox_fixels[i].max_peak_amp; } } @@ -293,7 +295,7 @@ void run () auto fod_data = H.get_image(); const bool dir_as_peak = get_options ("dirpeak").size(); - const uint32_t maxnum = get_option_value ("maxnum", 0); + const index_type maxnum = get_option_value ("maxnum", 0); Segmented_FOD_receiver receiver (H, maxnum, dir_as_peak); @@ -310,9 +312,9 @@ void run () receiver.set_directions_output (default_directions_filename); auto - opt = get_options ("afd"); if (opt.size()) receiver.set_afd_output (opt[0][0]); - opt = get_options ("peak"); if (opt.size()) receiver.set_peak_output (opt[0][0]); - opt = get_options ("disp"); if (opt.size()) receiver.set_disp_output (opt[0][0]); + opt = get_options ("afd"); if (opt.size()) receiver.set_afd_output (opt[0][0]); + opt = get_options ("peak_amp"); if (opt.size()) receiver.set_peak_amp_output (opt[0][0]); + opt = get_options ("disp"); if (opt.size()) receiver.set_disp_output (opt[0][0]); opt = get_options ("mask"); Image mask; @@ -333,4 +335,3 @@ void run () Thread::run_queue (writer, Thread::batch (SH_coefs()), Thread::multi (fmls), Thread::batch (FOD_lobes()), receiver); receiver.commit (); } - diff --git a/cmd/label2colour.cpp b/cmd/label2colour.cpp index 74be0e40af..46410959fa 100644 --- a/cmd/label2colour.cpp +++ b/cmd/label2colour.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" diff --git a/cmd/label2mesh.cpp b/cmd/label2mesh.cpp index 67ed65bc38..4d11d58624 100644 --- a/cmd/label2mesh.cpp +++ b/cmd/label2mesh.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" diff --git a/cmd/labelconvert.cpp b/cmd/labelconvert.cpp index ea2b0e8d67..2732154f23 100644 --- a/cmd/labelconvert.cpp +++ b/cmd/labelconvert.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "image_helpers.h" @@ -53,12 +54,18 @@ void usage () + "Typical usage is to convert a parcellation image provided by some other software, based on " "the lookup table provided by that software, to conform to a new lookup table, particularly " "one where the node indices increment from 1, in preparation for connectome construction; " - "examples of such target lookup table files are provided in share//mrtrix3//labelconvert//"; + "examples of such target lookup table files are provided in share//mrtrix3//labelconvert//, " + "but can be created by the user to provide the desired node set // ordering // colours."; + EXAMPLES + + Example ("Convert a Desikan-Killiany parcellation image as provided by FreeSurfer to have nodes incrementing from 1", + "labelconvert aparc+aseg.mgz FreeSurferColorLUT.txt mrtrix3//share//mrtrix3//labelconvert//fs_default.txt nodes.mif", + "Paths to the files in the example above would need to be revised according to their " + "locations on the user's system."); ARGUMENTS + Argument ("path_in", "the input image").type_image_in() - + Argument ("lut_in", "the connectome lookup table for the input image").type_file_in() + + Argument ("lut_in", "the connectome lookup table corresponding to the input image").type_file_in() + Argument ("lut_out", "the target connectome lookup table for the output image").type_file_in() + Argument ("image_out", "the output image").type_image_out(); diff --git a/cmd/labelstats.cpp b/cmd/labelstats.cpp new file mode 100644 index 0000000000..ae862a20e8 --- /dev/null +++ b/cmd/labelstats.cpp @@ -0,0 +1,121 @@ +/* Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "command.h" +#include "header.h" +#include "image.h" +#include "image_helpers.h" +#include "transform.h" +#include "types.h" + +#include "connectome/connectome.h" + +using namespace MR; +using namespace App; + + +const char * field_choices[] = { "mass", "centre", nullptr }; + + +void usage () +{ + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Compute statistics of parcels within a label image"; + + ARGUMENTS + + Argument ("input", "the input label image").type_image_in(); + + OPTIONS + + Option ("output", "output only the field specified; " + "options are: " + join(field_choices, ",")) + + Argument ("choice").type_choice (field_choices) + + + Option ("voxelspace", "report parcel centres of mass in voxel space rather than scanner space"); + +} + + +using Connectome::node_t; +using vector_type = Eigen::Array; +using matrix_type = Eigen::Matrix; + + + +void run () +{ + Header H = Header::open (argument[0]); + if (H.ndim() > 3) + throw Exception ("Command does not accept images with more than 3 dimensions"); + Connectome::check (H); + Image image = H.get_image(); + + matrix_type coms; + vector_type masses; + + for (auto l = Loop(image) (image); l; ++l) { + const node_t value = image.value(); + if (value) { + if (value > coms.rows()) { + coms.conservativeResizeLike (matrix_type::Zero (value, 3)); + masses.conservativeResizeLike (vector_type::Zero (value)); + } + coms.row(value-1) += Eigen::Vector3 (image.index(0), image.index(1), image.index(2)); + masses[value-1]++; + } + } + + coms = coms.array().colwise() / masses; + + if (!get_options("voxelspace").size()) + coms = (image.transform() * coms.transpose()).transpose(); + + auto opt = get_options ("output"); + if (opt.size()) { + switch (int(opt[0][0])) { + case 0: std::cout << masses; break; + case 1: std::cout << coms; break; + default: assert (0); + } + return; + } + + // Ensure that the printout of centres-of-mass is nicely formatted + Eigen::IOFormat fmt (Eigen::StreamPrecision, 0, ", ", "\n", "[ ", " ]", "", ""); + std::stringstream com_stringstream; + com_stringstream << coms.format (fmt); + const vector com_strings = split(com_stringstream.str(), "\n"); + assert (com_strings.size() == size_t(masses.size())); + + // Find width of first non-empty string, in order to centralise header label + size_t com_width = 0; + for (const auto& i : com_strings) { + if (i.size()) { + com_width = i.size(); + break; + } + } + + std::cout << std::setw(8) << std::right << "index" << " " + << std::setw(8) << std::right << "mass" << " " + << std::setw(4) << std::right << " " + << std::setw(com_width/2 + std::string("centre of mass").size()/2) << std::right << "centre of mass" << "\n"; + for (ssize_t i = 0; i != masses.size(); ++i) { + if (masses[i]) { + std::cout << std::setw(8) << std::right << i+1 << " " + << std::setw(8) << std::right << masses[i] << " " + << std::setw(4+com_width) << std::right << com_strings[i] << "\n"; + } + } +} diff --git a/cmd/maskdump.cpp b/cmd/maskdump.cpp index 9adaf6a2ec..a04dd5efb2 100644 --- a/cmd/maskdump.cpp +++ b/cmd/maskdump.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "algo/loop.h" diff --git a/cmd/maskfilter.cpp b/cmd/maskfilter.cpp index 060c5f2aa9..434b83b4fe 100644 --- a/cmd/maskfilter.cpp +++ b/cmd/maskfilter.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "filter/base.h" @@ -105,7 +106,7 @@ void run () { auto input_image = Image::open (argument[0]); int filter_index = argument[1]; - + if (filter_index == 0) { // Mask clean Filter::MaskClean filter (input_image, std::string("applying mask cleaning filter to image ") + Path::basename (argument[0])); filter.set_scale(get_option_value ("scale", DEFAULT_CLEAN_SCALE)); @@ -121,16 +122,9 @@ void run () { if (filter_index == 1) { // Connected components Filter::ConnectedComponents filter (input_image, std::string("applying connected-component filter to image ") + Path::basename (argument[0])); auto opt = get_options ("axes"); - vector axes; if (opt.size()) { - axes = opt[0][0]; - for (size_t d = 0; d < input_image.ndim(); d++) - filter.set_ignore_dim (d, true); - for (size_t i = 0; i < axes.size(); i++) { - if (axes[i] >= static_cast (input_image.ndim()) || axes[i] < 0) - throw Exception ("axis supplied to option -ignore is out of bounds"); - filter.set_ignore_dim (axes[i], false); - } + const vector axes = opt[0][0]; + filter.set_axes (axes); } bool largest_only = false; opt = get_options ("largest"); diff --git a/cmd/mesh2voxel.cpp b/cmd/mesh2voxel.cpp index 273057c7ac..891e1506e3 100644 --- a/cmd/mesh2voxel.cpp +++ b/cmd/mesh2voxel.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "header.h" @@ -38,7 +39,7 @@ void usage () SYNOPSIS = "Convert a mesh surface to a partial volume estimation image"; - REFERENCES + REFERENCES + "Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. " // Internal "Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. " "NeuroImage, 2012, 62, 1924-1938"; @@ -62,9 +63,12 @@ void run () Header template_header = Header::open (argument[1]); check_3D_nonunity (template_header); - // Create the output image + // Ensure that a floating-point representation is used for the output image, + // as is required for representing partial volumes template_header.datatype() = DataType::Float32; template_header.datatype().set_byte_order_native(); + + // Create the output image Image output = Image::create (argument[2], template_header); // Perform the partial volume estimation diff --git a/cmd/meshconvert.cpp b/cmd/meshconvert.cpp index 76b3a30c50..95b2e52885 100644 --- a/cmd/meshconvert.cpp +++ b/cmd/meshconvert.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "header.h" #include "surface/mesh.h" @@ -27,7 +28,7 @@ using namespace MR::Surface; -const char* transform_choices[] = { "first2real", "real2first", "voxel2real", "real2voxel", nullptr }; +const char* transform_choices[] = { "first2real", "real2first", "voxel2real", "real2voxel", "fs2real", nullptr }; @@ -79,6 +80,7 @@ void run () case 1: transform->set_real2first(); break; case 2: transform->set_voxel2real(); break; case 3: transform->set_real2voxel(); break; + case 4: transform->set_fs2real (); break; default: throw Exception ("Unexpected mode for spatial transformation of vertices"); } MeshMulti temp; diff --git a/cmd/meshfilter.cpp b/cmd/meshfilter.cpp index d0f6f18a65..3118b5ebb5 100644 --- a/cmd/meshfilter.cpp +++ b/cmd/meshfilter.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "thread_queue.h" @@ -48,6 +49,18 @@ void usage () SYNOPSIS = "Apply filter operations to meshes"; + DESCRIPTION + + "While this command has only one filter operation currently available, it " + "nevertheless presents with a comparable interface to the MRtrix3 commands " + "maskfilter and mrfilter commands."; + + EXAMPLES + + Example ("Apply a mesh smoothing filter (currently the only filter available", + "meshfilter input.vtk smooth output.vtk", + "The usage of this command may cause confusion due to the generic interface " + "despite only one filtering operation being currently available. This simple " + "example usage is therefore provided for clarity."); + ARGUMENTS + Argument ("input", "the input mesh file").type_file_in() + Argument ("filter", "the filter to apply." diff --git a/cmd/mraverageheader.cpp b/cmd/mraverageheader.cpp index aadae11321..d6f9fe1e9e 100644 --- a/cmd/mraverageheader.cpp +++ b/cmd/mraverageheader.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "debug.h" #include "image.h" @@ -85,7 +86,7 @@ void run () } vector> transform_header_with; - auto H = compute_minimum_average_header (headers_in, resolution, padding, transform_header_with); + auto H = compute_minimum_average_header (headers_in, transform_header_with, resolution, padding); H.datatype() = DataType::Bit; if (fill) { H.ndim() = dim; diff --git a/cmd/mrcalc.cpp b/cmd/mrcalc.cpp index 0b2e409e82..0968c26947 100644 --- a/cmd/mrcalc.cpp +++ b/cmd/mrcalc.cpp @@ -1,18 +1,184 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ + + +/********************************************************************** + CONVENIENCE MACROS: + **********************************************************************/ + +#ifdef SECTION + +#undef SECTION_TITLE +#undef UNARY_OP +#undef BINARY_OP +#undef TERNARY_OP + +# if SECTION == 1 // usage section + +#define SECTION_TITLE(TITLE) \ + + OptionGroup (TITLE) + +#define UNARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + + Option (#OPTION, FEEDBACK " : " DESCRIPTION).allow_multiple() + +#define BINARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + + Option (#OPTION, FEEDBACK " : " DESCRIPTION).allow_multiple() + +#define TERNARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + + Option (#OPTION, FEEDBACK " : " DESCRIPTION).allow_multiple() + +# elif SECTION == 2 // code section + +#define SECTION_TITLE(TITLE) + +#define UNARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + class Op_##OPTION : public OpUnary { NOMEMALIGN \ + public: \ + Op_##OPTION () : OpUnary (FEEDBACK, FLAGS & COMPLEX_MAPS_TO_REAL, FLAGS & REAL_MAPS_TO_COMPLEX) { } \ + complex_type R (real_type v) const REAL_OPERATION \ + complex_type Z (complex_type v) const COMPLEX_OPERATION \ + }; + +#define BINARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + class Op_##OPTION : public OpBinary { NOMEMALIGN \ + public: \ + Op_##OPTION () : OpBinary (FEEDBACK, FLAGS & COMPLEX_MAPS_TO_REAL, FLAGS & REAL_MAPS_TO_COMPLEX) { } \ + complex_type R (real_type a, real_type b) const REAL_OPERATION \ + complex_type Z (complex_type a, complex_type b) const COMPLEX_OPERATION \ + }; + +#define TERNARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + class Op_##OPTION : public OpTernary { NOMEMALIGN \ + public: \ + Op_##OPTION () : OpTernary (FEEDBACK, FLAGS & COMPLEX_MAPS_TO_REAL, FLAGS & REAL_MAPS_TO_COMPLEX) { } \ + complex_type R (real_type a, real_type b, real_type c) const REAL_OPERATION \ + complex_type Z (complex_type a, complex_type b, complex_type c) const COMPLEX_OPERATION \ + }; + +# elif SECTION == 3 // parsing section + +#define SECTION_TITLE(TITLE) + +#define UNARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + else if (opt->is (#OPTION)) unary_operation (opt->id, stack, Op_##OPTION()); + +#define BINARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + else if (opt->is (#OPTION)) binary_operation (opt->id, stack, Op_##OPTION()); + +#define TERNARY_OP(OPTION,FEEDBACK,FLAGS,DESCRIPTION,REAL_OPERATION,COMPLEX_OPERATION) \ + else if (opt->is (#OPTION)) ternary_operation (opt->id, stack, Op_##OPTION()); + +# endif + +#define NORMAL 0U +#define COMPLEX_MAPS_TO_REAL 1U +#define REAL_MAPS_TO_COMPLEX 2U +#define NOT_IMPLEMENTED { throw Exception ("operation not supported"); } + + + +/********************************************************************** + Operations defined below: + **********************************************************************/ + +SECTION_TITLE ("basic operations") +UNARY_OP (abs, "|%1|", COMPLEX_MAPS_TO_REAL, "return absolute value (magnitude) of real or complex number", { return abs (v); }, { return abs (v); } ) +UNARY_OP (neg, "-%1", NORMAL, "negative value", { return -v; }, { return -v; }) +BINARY_OP (add, "(%1 + %2)", NORMAL, "add values", { return a+b; }, { return a+b; }) +BINARY_OP (subtract, "(%1 - %2)", NORMAL, "subtract nth operand from (n-1)th", { return a-b; }, { return a-b; }) +BINARY_OP (multiply, "(%1 * %2)", NORMAL, "multiply values", { return a*b; }, { return a*b; }) +BINARY_OP (divide, "(%1 / %2)", NORMAL, "divide (n-1)th operand by nth", { return a/b; }, { return a/b; }) +BINARY_OP (min, "min (%1, %2)", NORMAL, "smallest of last two operands", { return std::min (a, b); }, NOT_IMPLEMENTED) +BINARY_OP (max, "max (%1, %2)", NORMAL, "greatest of last two operands", { return std::max (a, b); }, NOT_IMPLEMENTED) + +SECTION_TITLE ("comparison operators") +BINARY_OP (lt, "(%1 < %2)", NORMAL, "less-than operator (true=1, false=0)", { return a < b; }, NOT_IMPLEMENTED) +BINARY_OP (gt, "(%1 > %2)", NORMAL, "greater-than operator (true=1, false=0)", { return a > b; }, NOT_IMPLEMENTED) +BINARY_OP (le, "(%1 <= %2)", NORMAL, "less-than-or-equal-to operator (true=1, false=0)", { return a <= b; }, NOT_IMPLEMENTED) +BINARY_OP (ge, "(%1 >= %2)", NORMAL, "greater-than-or-equal-to operator (true=1, false=0)", { return a >= b; }, NOT_IMPLEMENTED) +BINARY_OP (eq, "(%1 == %2)", COMPLEX_MAPS_TO_REAL, "equal-to operator (true=1, false=0)", { return a == b; }, { return a == b; }) +BINARY_OP (neq, "(%1 != %2)", COMPLEX_MAPS_TO_REAL, "not-equal-to operator (true=1, false=0)", { return a != b; }, { return a != b; }) + +SECTION_TITLE ("conditional operators") +TERNARY_OP (if, "(%1 ? %2 : %3)", NORMAL, "if first operand is true (non-zero), return second operand, otherwise return third operand", { return a ? b : c; }, { return is_true(a) ? b : c; }) +TERNARY_OP (replace, "(%1, %2 -> %3)", NORMAL, "Wherever first operand is equal to the second operand, replace with third operand", { return (a==b) ? c : a; }, { return (a==b) ? c : a; }) + +SECTION_TITLE ("power functions") +UNARY_OP (sqrt, "sqrt (%1)", NORMAL, "square root", { return std::sqrt (v); }, { return std::sqrt (v); }) +BINARY_OP (pow, "%1^%2", NORMAL, "raise (n-1)th operand to nth power", { return std::pow (a, b); }, { return std::pow (a, b); }) + +SECTION_TITLE ("nearest integer operations") +UNARY_OP (round, "round (%1)", NORMAL, "round to nearest integer", { return std::round (v); }, NOT_IMPLEMENTED) +UNARY_OP (ceil, "ceil (%1)", NORMAL, "round up to nearest integer", { return std::ceil (v); }, NOT_IMPLEMENTED) +UNARY_OP (floor, "floor (%1)", NORMAL, "round down to nearest integer", { return std::floor (v); }, NOT_IMPLEMENTED) + +SECTION_TITLE ("logical operators") +UNARY_OP (not, "!%1", NORMAL, "NOT operator: true (1) if operand is false (i.e. zero)", { return !v; }, { return !is_true (v); }) +BINARY_OP (and, "(%1 && %2)", NORMAL, "AND operator: true (1) if both operands are true (i.e. non-zero)", { return a && b; }, { return is_true(a) && is_true(b); }) +BINARY_OP (or, "(%1 || %2)", NORMAL, "OR operator: true (1) if either operand is true (i.e. non-zero)", { return a || b; }, { return is_true(a) || is_true(b); }) +BINARY_OP (xor, "(%1 ^^ %2)", NORMAL, "XOR operator: true (1) if only one of the operands is true (i.e. non-zero)", { return (!a) != (!b); }, { return is_true(a) != is_true(b); }) + +SECTION_TITLE ("classification functions") +UNARY_OP (isnan, "isnan (%1)", COMPLEX_MAPS_TO_REAL, "true (1) if operand is not-a-number (NaN)", { return std::isnan (v); }, { return std::isnan (v.real()) || std::isnan (v.imag()); }) +UNARY_OP (isinf, "isinf (%1)", COMPLEX_MAPS_TO_REAL, "true (1) if operand is infinite (Inf)", { return std::isinf (v); }, { return std::isinf (v.real()) || std::isinf (v.imag()); }) +UNARY_OP (finite, "finite (%1)", COMPLEX_MAPS_TO_REAL, "true (1) if operand is finite (i.e. not NaN or Inf)", { return std::isfinite (v); }, { return std::isfinite (v.real()) && std::isfinite (v.imag()); }) + +SECTION_TITLE ("complex numbers") +BINARY_OP (complex, "(%1 + %2 i)", REAL_MAPS_TO_COMPLEX, "create complex number using the last two operands as real,imaginary components", { return complex_type (a, b); }, NOT_IMPLEMENTED) +BINARY_OP (polar, "(%1 /_ %2)", REAL_MAPS_TO_COMPLEX, "create complex number using the last two operands as magnitude,phase components (phase in radians)", { return std::polar (a, b); }, NOT_IMPLEMENTED) +UNARY_OP (real, "real (%1)", COMPLEX_MAPS_TO_REAL, "real part of complex number", { return v; }, { return v.real(); }) +UNARY_OP (imag, "imag (%1)", COMPLEX_MAPS_TO_REAL, "imaginary part of complex number", { return 0.0; }, { return v.imag(); }) +UNARY_OP (phase, "phase (%1)", COMPLEX_MAPS_TO_REAL, "phase of complex number (use -abs for magnitude)", { return v < 0.0 ? Math::pi : 0.0; }, { return std::arg (v); }) +UNARY_OP (conj, "conj (%1)", NORMAL, "complex conjugate", { return v; }, { return std::conj (v); }) +UNARY_OP (proj, "proj (%1)", REAL_MAPS_TO_COMPLEX, "projection onto the Riemann sphere", { return std::proj (v); }, { return std::proj (v); }) + +SECTION_TITLE ("exponential functions") +UNARY_OP (exp, "exp (%1)", NORMAL, "exponential function", { return std::exp (v); }, { return std::exp (v); }) +UNARY_OP (log, "log (%1)", NORMAL, "natural logarithm", { return std::log (v); }, { return std::log (v); }) +UNARY_OP (log10, "log10 (%1)", NORMAL, "common logarithm", { return std::log10 (v); }, { return std::log10 (v); }) + +SECTION_TITLE ("trigonometric functions") +UNARY_OP (cos, "cos (%1)", NORMAL, "cosine", { return std::cos (v); }, { return std::cos (v); }) +UNARY_OP (sin, "sin (%1)", NORMAL, "sine", { return std::sin (v); }, { return std::sin (v); }) +UNARY_OP (tan, "tan (%1)", NORMAL, "tangent", { return std::tan (v); }, { return std::tan (v); }) +UNARY_OP (acos, "acos (%1)", NORMAL, "inverse cosine", { return std::acos (v); }, { return std::acos (v); }) +UNARY_OP (asin, "asin (%1)", NORMAL, "inverse sine", { return std::asin (v); }, { return std::asin (v); }) +UNARY_OP (atan, "atan (%1)", NORMAL, "inverse tangent", { return std::atan (v); }, { return std::atan (v); }) + +SECTION_TITLE ("hyperbolic functions") +UNARY_OP (cosh, "cosh (%1)", NORMAL, "hyperbolic cosine", { return std::cosh (v); }, { return std::cosh (v); }) +UNARY_OP (sinh, "sinh (%1)", NORMAL, "hyperbolic sine", { return std::sinh (v); }, { return std::sinh (v); }) +UNARY_OP (tanh, "tanh (%1)", NORMAL, "hyperbolic tangent", { return std::tanh (v); }, { return std::tanh (v); }) +UNARY_OP (acosh, "acosh (%1)", NORMAL, "inverse hyperbolic cosine", { return std::acosh (v); }, { return std::acosh (v); }) +UNARY_OP (asinh, "asinh (%1)", NORMAL, "inverse hyperbolic sine", { return std::asinh (v); }, { return std::asinh (v); }) +UNARY_OP (atanh, "atanh (%1)", NORMAL, "inverse hyperbolic tangent", { return std::atanh (v); }, { return std::atanh (v); }) + + + +#undef SECTION + +#else + + +/********************************************************************** + Main program + **********************************************************************/ + #include "command.h" #include "image.h" #include "memory.h" @@ -21,11 +187,17 @@ #include "algo/threaded_copy.h" #include "dwi/gradient.h" - using namespace MR; using namespace App; +using real_type = float; +using complex_type = cfloat; +static bool transform_mis_match_reported (false); + +inline bool is_true (const complex_type& z) { return z.real() || z.imag(); } + + void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)"; @@ -43,21 +215,7 @@ DESCRIPTION "images) are pushed onto the stack in the order they appear " "(as arguments) on the command-line, and operators (specified " "as options) operate on and consume the top-most entries in " - "the stack, and push their output as a new entry on the stack. " - "For example:" - - + " $ mrcalc a.mif 2 -mult r.mif" - - + "performs the operation r = 2*a for every voxel a,r in " - "images a.mif and r.mif respectively. Similarly:" - - + " $ mrcalc a.mif -neg b.mif -div -exp 9.3 -mult r.mif" - - + "performs the operation r = 9.3*exp(-a/b), and:" - - + " $ mrcalc a.mif b.mif -add c.mif d.mif -mult 4.2 -add -div r.mif" - - + "performs r = (a+b)/(c*d+4.2)." + "the stack, and push their output as a new entry on the stack." + "As an additional feature, this command will allow images with different " "dimensions to be processed, provided they satisfy the following " @@ -72,77 +230,42 @@ DESCRIPTION "volume consists of the 3D image scaled by the corresponding value for " "that volume in the single-voxel image."; +EXAMPLES + + Example ("Double the value stored in every voxel", + "mrcalc a.mif 2 -mult r.mif", + "This performs the operation: r = 2*a for every voxel a,r in " + "images a.mif and r.mif respectively.") + + + Example ("A more complex example", + "mrcalc a.mif -neg b.mif -div -exp 9.3 -mult r.mif", + "This performs the operation: r = 9.3*exp(-a/b)") + + + Example ("Another complex example", + "mrcalc a.mif b.mif -add c.mif d.mif -mult 4.2 -add -div r.mif", + "This performs: r = (a+b)/(c*d+4.2).") + + + Example ("Rescale the densities in a SH l=0 image", + "mrcalc ODF_CSF.mif 4 pi -mult -sqrt -div ODF_CSF_scaled.mif", + "This applies the spherical harmonic basis scaling factor: " + "1.0/sqrt(4*pi), such that a single-tissue voxel containing the " + "same intensities as the response function of that tissue " + "should contain the value 1.0."); + ARGUMENTS + Argument ("operand", "an input image, intensity value, or the special keywords " "'rand' (random number between 0 and 1) or 'randn' (random number from unit " - "std.dev. normal distribution).").type_various().allow_multiple(); + "std.dev. normal distribution) or the mathematical constants 'e' and 'pi'.").type_various().allow_multiple(); OPTIONS - + OptionGroup ("Unary operators") - - + Option ("abs", "absolute value").allow_multiple() - + Option ("neg", "negative value").allow_multiple() - + Option ("sqrt", "square root").allow_multiple() - + Option ("exp", "exponential function").allow_multiple() - + Option ("log", "natural logarithm").allow_multiple() - + Option ("log10", "common logarithm").allow_multiple() - + Option ("cos", "cosine").allow_multiple() - + Option ("sin", "sine").allow_multiple() - + Option ("tan", "tangent").allow_multiple() - + Option ("cosh", "hyperbolic cosine").allow_multiple() - + Option ("sinh", "hyperbolic sine").allow_multiple() - + Option ("tanh", "hyperbolic tangent").allow_multiple() - + Option ("acos", "inverse cosine").allow_multiple() - + Option ("asin", "inverse sine").allow_multiple() - + Option ("atan", "inverse tangent").allow_multiple() - + Option ("acosh", "inverse hyperbolic cosine").allow_multiple() - + Option ("asinh", "inverse hyperbolic sine").allow_multiple() - + Option ("atanh", "inverse hyperbolic tangent").allow_multiple() - + Option ("round", "round to nearest integer").allow_multiple() - + Option ("ceil", "round up to nearest integer").allow_multiple() - + Option ("floor", "round down to nearest integer").allow_multiple() - + Option ("isnan", "true (1) is operand is not-a-number (NaN)").allow_multiple() - + Option ("isinf", "true (1) is operand is infinite (Inf)").allow_multiple() - + Option ("finite", "true (1) is operand is finite (i.e. not NaN or Inf)").allow_multiple() - - + Option ("real", "real part of complex number").allow_multiple() - + Option ("imag", "imaginary part of complex number").allow_multiple() - + Option ("phase", "phase of complex number").allow_multiple() - + Option ("conj", "complex conjugate").allow_multiple() - - + OptionGroup ("Binary operators") - - + Option ("add", "add values").allow_multiple() - + Option ("subtract", "subtract nth operand from (n-1)th").allow_multiple() - + Option ("multiply", "multiply values").allow_multiple() - + Option ("divide", "divide (n-1)th operand by nth").allow_multiple() - + Option ("pow", "raise (n-1)th operand to nth power").allow_multiple() - + Option ("min", "smallest of last two operands").allow_multiple() - + Option ("max", "greatest of last two operands").allow_multiple() - + Option ("lt", "less-than operator (true=1, false=0)").allow_multiple() - + Option ("gt", "greater-than operator (true=1, false=0)").allow_multiple() - + Option ("le", "less-than-or-equal-to operator (true=1, false=0)").allow_multiple() - + Option ("ge", "greater-than-or-equal-to operator (true=1, false=0)").allow_multiple() - + Option ("eq", "equal-to operator (true=1, false=0)").allow_multiple() - + Option ("neq", "not-equal-to operator (true=1, false=0)").allow_multiple() - - + Option ("complex", "create complex number using the last two operands as real,imaginary components").allow_multiple() - - + OptionGroup ("Ternary operators") - - + Option ("if", "if first operand is true (non-zero), return second operand, otherwise return third operand").allow_multiple() - + Option ("replace", "Wherever first operand is equal to the second operand, replace with third operand").allow_multiple() +#define SECTION 1 +#include "mrcalc.cpp" + DataType::options(); } -using real_type = float; -using complex_type = cfloat; -static bool transform_mis_match_reported (false); - /********************************************************************** STACK FRAMEWORK: @@ -242,18 +365,23 @@ class StackEntry { NOMEMALIGN image.reset (new Image (header.get_image())); image_list.insert (std::make_pair (arg, LoadedImage (image, image_is_complex))); } - catch (Exception&) { + catch (Exception& e_image) { try { std::string a = lowercase (arg); - if (a == "nan") { value = std::numeric_limits::quiet_NaN(); } - else if (a == "-nan") { value = -std::numeric_limits::quiet_NaN(); } - else if (a == "inf") { value = std::numeric_limits::infinity(); } - else if (a == "-inf") { value = -std::numeric_limits::infinity(); } + if (a == "pi") { value = Math::pi; } + else if (a == "e") { value = Math::e; } else if (a == "rand") { value = 0.0; rng.reset (new Math::RNG()); rng_gaussian = false; } else if (a == "randn") { value = 0.0; rng.reset (new Math::RNG()); rng_gaussian = true; } else { value = to (arg); } - } catch (Exception&) { - throw Exception (std::string ("Could not interpret string \"") + arg + "\" as either an image path or a numerical value"); + } catch (Exception& e_number) { + Exception e (std::string ("Could not interpret string \"") + arg + "\" as either an image path or a numerical value"); + e.push_back ("As image: "); + for (size_t i = 0; i != e_image.num(); ++i) + e.push_back (e_image[i]); + e.push_back ("As numerical value: "); + for (size_t i = 0; i != e_number.num(); ++i) + e.push_back (e_number[i]); + throw e; } } } @@ -603,29 +731,7 @@ void get_header (const StackEntry& entry, Header& header) header.spacing(n) = entry.image->spacing(n); } - const auto header_grad = DWI::parse_DW_scheme (header); - if (header_grad.rows()) { - const auto entry_grad = DWI::parse_DW_scheme (*entry.image); - if (entry_grad.rows()) { - if (!entry_grad.isApprox (header_grad)) - DWI::clear_DW_scheme (header); - } - } - - const auto header_pe = PhaseEncoding::get_scheme (header); - if (header_pe.rows()) { - const auto entry_pe = PhaseEncoding::get_scheme (*entry.image); - if (entry_pe.rows()) { - if (!entry_pe.isApprox (header_pe)) - PhaseEncoding::clear_scheme (header); - } - } - - auto slice_encoding_it = entry.image->keyval().find ("SliceEncodingDirection"); - if (slice_encoding_it != entry.image->keyval().end()) { - if (header.keyval()["SliceEncodingDirection"] != slice_encoding_it->second) - header.keyval().erase (header.keyval().find ("SliceEncodingDirection")); - } + header.merge_keyval (*entry.image); } @@ -780,314 +886,12 @@ class OpTernary : public OpBase { NOMEMALIGN }; - /********************************************************************** - UNARY OPERATIONS: + EXPAND OPERATIONS: **********************************************************************/ -class OpAbs : public OpUnary { NOMEMALIGN - public: - OpAbs () : OpUnary ("|%1|", true) { } - complex_type R (real_type v) const { return abs (v); } - complex_type Z (complex_type v) const { return abs (v); } -}; - -class OpNeg : public OpUnary { NOMEMALIGN - public: - OpNeg () : OpUnary ("-%1") { } - complex_type R (real_type v) const { return -v; } - complex_type Z (complex_type v) const { return -v; } -}; - -class OpSqrt : public OpUnary { NOMEMALIGN - public: - OpSqrt () : OpUnary ("sqrt (%1)") { } - complex_type R (real_type v) const { return std::sqrt (v); } - complex_type Z (complex_type v) const { return std::sqrt (v); } -}; - -class OpExp : public OpUnary { NOMEMALIGN - public: - OpExp () : OpUnary ("exp (%1)") { } - complex_type R (real_type v) const { return std::exp (v); } - complex_type Z (complex_type v) const { return std::exp (v); } -}; - -class OpLog : public OpUnary { NOMEMALIGN - public: - OpLog () : OpUnary ("log (%1)") { } - complex_type R (real_type v) const { return std::log (v); } - complex_type Z (complex_type v) const { return std::log (v); } -}; - -class OpLog10 : public OpUnary { NOMEMALIGN - public: - OpLog10 () : OpUnary ("log10 (%1)") { } - complex_type R (real_type v) const { return std::log10 (v); } - complex_type Z (complex_type v) const { return std::log10 (v); } -}; - -class OpCos : public OpUnary { NOMEMALIGN - public: - OpCos () : OpUnary ("cos (%1)") { } - complex_type R (real_type v) const { return std::cos (v); } - complex_type Z (complex_type v) const { return std::cos (v); } -}; - -class OpSin : public OpUnary { NOMEMALIGN - public: - OpSin () : OpUnary ("sin (%1)") { } - complex_type R (real_type v) const { return std::sin (v); } - complex_type Z (complex_type v) const { return std::sin (v); } -}; - -class OpTan : public OpUnary { NOMEMALIGN - public: - OpTan () : OpUnary ("tan (%1)") { } - complex_type R (real_type v) const { return std::tan (v); } - complex_type Z (complex_type v) const { return std::tan (v); } -}; - -class OpCosh : public OpUnary { NOMEMALIGN - public: - OpCosh () : OpUnary ("cosh (%1)") { } - complex_type R (real_type v) const { return std::cosh (v); } - complex_type Z (complex_type v) const { return std::cosh (v); } -}; - -class OpSinh : public OpUnary { NOMEMALIGN - public: - OpSinh () : OpUnary ("sinh (%1)") { } - complex_type R (real_type v) const { return std::sinh (v); } - complex_type Z (complex_type v) const { return std::sinh (v); } -}; - -class OpTanh : public OpUnary { NOMEMALIGN - public: - OpTanh () : OpUnary ("tanh (%1)") { } - complex_type R (real_type v) const { return std::tanh (v); } - complex_type Z (complex_type v) const { return std::tanh (v); } -}; - -class OpAcos : public OpUnary { NOMEMALIGN - public: - OpAcos () : OpUnary ("acos (%1)") { } - complex_type R (real_type v) const { return std::acos (v); } -}; - -class OpAsin : public OpUnary { NOMEMALIGN - public: - OpAsin () : OpUnary ("asin (%1)") { } - complex_type R (real_type v) const { return std::asin (v); } -}; - -class OpAtan : public OpUnary { NOMEMALIGN - public: - OpAtan () : OpUnary ("atan (%1)") { } - complex_type R (real_type v) const { return std::atan (v); } -}; - -class OpAcosh : public OpUnary { NOMEMALIGN - public: - OpAcosh () : OpUnary ("acosh (%1)") { } - complex_type R (real_type v) const { return std::acosh (v); } -}; - -class OpAsinh : public OpUnary { NOMEMALIGN - public: - OpAsinh () : OpUnary ("asinh (%1)") { } - complex_type R (real_type v) const { return std::asinh (v); } -}; - -class OpAtanh : public OpUnary { NOMEMALIGN - public: - OpAtanh () : OpUnary ("atanh (%1)") { } - complex_type R (real_type v) const { return std::atanh (v); } -}; - - -class OpRound : public OpUnary { NOMEMALIGN - public: - OpRound () : OpUnary ("round (%1)") { } - complex_type R (real_type v) const { return std::round (v); } -}; - -class OpCeil : public OpUnary { NOMEMALIGN - public: - OpCeil () : OpUnary ("ceil (%1)") { } - complex_type R (real_type v) const { return std::ceil (v); } -}; - -class OpFloor : public OpUnary { NOMEMALIGN - public: - OpFloor () : OpUnary ("floor (%1)") { } - complex_type R (real_type v) const { return std::floor (v); } -}; - -class OpReal : public OpUnary { NOMEMALIGN - public: - OpReal () : OpUnary ("real (%1)", true) { } - complex_type Z (complex_type v) const { return v.real(); } -}; - -class OpImag : public OpUnary { NOMEMALIGN - public: - OpImag () : OpUnary ("imag (%1)", true) { } - complex_type Z (complex_type v) const { return v.imag(); } -}; - -class OpPhase : public OpUnary { NOMEMALIGN - public: - OpPhase () : OpUnary ("phase (%1)", true) { } - complex_type Z (complex_type v) const { return std::arg (v); } -}; - -class OpConj : public OpUnary { NOMEMALIGN - public: - OpConj () : OpUnary ("conj (%1)") { } - complex_type Z (complex_type v) const { return std::conj (v); } -}; - -class OpIsNaN : public OpUnary { NOMEMALIGN - public: - OpIsNaN () : OpUnary ("isnan (%1)", true, false) { } - complex_type R (real_type v) const { return std::isnan (v) != 0; } - complex_type Z (complex_type v) const { return std::isnan (v.real()) != 0 || std::isnan (v.imag()) != 0; } -}; - -class OpIsInf : public OpUnary { NOMEMALIGN - public: - OpIsInf () : OpUnary ("isinf (%1)", true, false) { } - complex_type R (real_type v) const { return std::isinf (v) != 0; } - complex_type Z (complex_type v) const { return std::isinf (v.real()) != 0 || std::isinf (v.imag()) != 0; } -}; - -class OpFinite : public OpUnary { NOMEMALIGN - public: - OpFinite () : OpUnary ("finite (%1)", true, false) { } - complex_type R (real_type v) const { return std::isfinite (v) != 0; } - complex_type Z (complex_type v) const { return std::isfinite (v.real()) != 0|| std::isfinite (v.imag()) != 0; } -}; - - -/********************************************************************** - BINARY OPERATIONS: -**********************************************************************/ - -class OpAdd : public OpBinary { NOMEMALIGN - public: - OpAdd () : OpBinary ("(%1 + %2)") { } - complex_type R (real_type a, real_type b) const { return a+b; } - complex_type Z (complex_type a, complex_type b) const { return a+b; } -}; - -class OpSubtract : public OpBinary { NOMEMALIGN - public: - OpSubtract () : OpBinary ("(%1 - %2)") { } - complex_type R (real_type a, real_type b) const { return a-b; } - complex_type Z (complex_type a, complex_type b) const { return a-b; } -}; - -class OpMultiply : public OpBinary { NOMEMALIGN - public: - OpMultiply () : OpBinary ("(%1 * %2)") { } - complex_type R (real_type a, real_type b) const { return a*b; } - complex_type Z (complex_type a, complex_type b) const { return a*b; } -}; - -class OpDivide : public OpBinary { NOMEMALIGN - public: - OpDivide () : OpBinary ("(%1 / %2)") { } - complex_type R (real_type a, real_type b) const { return a/b; } - complex_type Z (complex_type a, complex_type b) const { return a/b; } -}; - -class OpPow : public OpBinary { NOMEMALIGN - public: - OpPow () : OpBinary ("%1^%2") { } - complex_type R (real_type a, real_type b) const { return std::pow (a, b); } - complex_type Z (complex_type a, complex_type b) const { return std::pow (a, b); } -}; - -class OpMin : public OpBinary { NOMEMALIGN - public: - OpMin () : OpBinary ("min (%1, %2)") { } - complex_type R (real_type a, real_type b) const { return std::min (a, b); } -}; - -class OpMax : public OpBinary { NOMEMALIGN - public: - OpMax () : OpBinary ("max (%1, %2)") { } - complex_type R (real_type a, real_type b) const { return std::max (a, b); } -}; - -class OpLessThan : public OpBinary { NOMEMALIGN - public: - OpLessThan () : OpBinary ("(%1 < %2)") { } - complex_type R (real_type a, real_type b) const { return a < b; } -}; - -class OpGreaterThan : public OpBinary { NOMEMALIGN - public: - OpGreaterThan () : OpBinary ("(%1 > %2)") { } - complex_type R (real_type a, real_type b) const { return a > b; } -}; - -class OpLessThanOrEqual : public OpBinary { NOMEMALIGN - public: - OpLessThanOrEqual () : OpBinary ("(%1 <= %2)") { } - complex_type R (real_type a, real_type b) const { return a <= b; } -}; - -class OpGreaterThanOrEqual : public OpBinary { NOMEMALIGN - public: - OpGreaterThanOrEqual () : OpBinary ("(%1 >= %2)") { } - complex_type R (real_type a, real_type b) const { return a >= b; } -}; - -class OpEqual : public OpBinary { NOMEMALIGN - public: - OpEqual () : OpBinary ("(%1 == %2)", true) { } - complex_type R (real_type a, real_type b) const { return a == b; } - complex_type Z (complex_type a, complex_type b) const { return a == b; } -}; - -class OpNotEqual : public OpBinary { NOMEMALIGN - public: - OpNotEqual () : OpBinary ("(%1 != %2)", true) { } - complex_type R (real_type a, real_type b) const { return a != b; } - complex_type Z (complex_type a, complex_type b) const { return a != b; } -}; - -class OpComplex : public OpBinary { NOMEMALIGN - public: - OpComplex () : OpBinary ("(%1 + %2 i)", false, true) { } - complex_type R (real_type a, real_type b) const { return complex_type (a, b); } -}; - - - - - - - -/********************************************************************** - TERNARY OPERATIONS: -**********************************************************************/ - -class OpIf : public OpTernary { NOMEMALIGN - public: - OpIf () : OpTernary ("(%1 ? %2 : %3)") { } - complex_type R (real_type a, real_type b, real_type c) const { return a ? b : c; } - complex_type Z (complex_type a, complex_type b, complex_type c) const { return a.real() ? b : c; } -}; - -class OpReplace : public OpTernary { NOMEMALIGN - public: - OpReplace () : OpTernary ("(%1, %2 -> %3)") { } - complex_type R (real_type a, real_type b, real_type c) const { return ((a==b) || (std::isnan(a) && std::isnan(b))) ? c : a; } - complex_type Z (complex_type a, complex_type b, complex_type c) const { return (a==b) ? c : a; } -}; +#define SECTION 2 +#include "mrcalc.cpp" @@ -1102,67 +906,15 @@ void run () { const Option* opt = match_option (App::argv[n]); if (opt) { - if (opt->is ("abs")) unary_operation (opt->id, stack, OpAbs()); - else if (opt->is ("neg")) unary_operation (opt->id, stack, OpNeg()); - else if (opt->is ("sqrt")) unary_operation (opt->id, stack, OpSqrt()); - else if (opt->is ("exp")) unary_operation (opt->id, stack, OpExp()); - else if (opt->is ("log")) unary_operation (opt->id, stack, OpLog()); - else if (opt->is ("log10")) unary_operation (opt->id, stack, OpLog10()); - - else if (opt->is ("cos")) unary_operation (opt->id, stack, OpCos()); - else if (opt->is ("sin")) unary_operation (opt->id, stack, OpSin()); - else if (opt->is ("tan")) unary_operation (opt->id, stack, OpTan()); - - else if (opt->is ("cosh")) unary_operation (opt->id, stack, OpCosh()); - else if (opt->is ("sinh")) unary_operation (opt->id, stack, OpSinh()); - else if (opt->is ("tanh")) unary_operation (opt->id, stack, OpTanh()); - - else if (opt->is ("acos")) unary_operation (opt->id, stack, OpAcos()); - else if (opt->is ("asin")) unary_operation (opt->id, stack, OpAsin()); - else if (opt->is ("atan")) unary_operation (opt->id, stack, OpAtan()); - - else if (opt->is ("acosh")) unary_operation (opt->id, stack, OpAcosh()); - else if (opt->is ("asinh")) unary_operation (opt->id, stack, OpAsinh()); - else if (opt->is ("atanh")) unary_operation (opt->id, stack, OpAtanh()); - - else if (opt->is ("round")) unary_operation (opt->id, stack, OpRound()); - else if (opt->is ("ceil")) unary_operation (opt->id, stack, OpCeil()); - else if (opt->is ("floor")) unary_operation (opt->id, stack, OpFloor()); - - else if (opt->is ("real")) unary_operation (opt->id, stack, OpReal()); - else if (opt->is ("imag")) unary_operation (opt->id, stack, OpImag()); - else if (opt->is ("phase")) unary_operation (opt->id, stack, OpPhase()); - else if (opt->is ("conj")) unary_operation (opt->id, stack, OpConj()); - - else if (opt->is ("isnan")) unary_operation (opt->id, stack, OpIsNaN()); - else if (opt->is ("isinf")) unary_operation (opt->id, stack, OpIsInf()); - else if (opt->is ("finite")) unary_operation (opt->id, stack, OpFinite()); - - else if (opt->is ("add")) binary_operation (opt->id, stack, OpAdd()); - else if (opt->is ("subtract")) binary_operation (opt->id, stack, OpSubtract()); - else if (opt->is ("multiply")) binary_operation (opt->id, stack, OpMultiply()); - else if (opt->is ("divide")) binary_operation (opt->id, stack, OpDivide()); - else if (opt->is ("pow")) binary_operation (opt->id, stack, OpPow()); - - else if (opt->is ("min")) binary_operation (opt->id, stack, OpMin()); - else if (opt->is ("max")) binary_operation (opt->id, stack, OpMax()); - else if (opt->is ("lt")) binary_operation (opt->id, stack, OpLessThan()); - else if (opt->is ("gt")) binary_operation (opt->id, stack, OpGreaterThan()); - else if (opt->is ("le")) binary_operation (opt->id, stack, OpLessThanOrEqual()); - else if (opt->is ("ge")) binary_operation (opt->id, stack, OpGreaterThanOrEqual()); - else if (opt->is ("eq")) binary_operation (opt->id, stack, OpEqual()); - else if (opt->is ("neq")) binary_operation (opt->id, stack, OpNotEqual()); - - else if (opt->is ("complex")) binary_operation (opt->id, stack, OpComplex()); - - else if (opt->is ("if")) ternary_operation (opt->id, stack, OpIf()); - else if (opt->is ("replace")) ternary_operation (opt->id, stack, OpReplace()); - - else if (opt->is ("datatype")) ++n; + + if (opt->is ("datatype")) ++n; else if (opt->is ("nthreads")) ++n; else if (opt->is ("force") || opt->is ("info") || opt->is ("debug") || opt->is ("quiet")) continue; +#define SECTION 3 +#include "mrcalc.cpp" + else throw Exception (std::string ("operation \"") + opt->id + "\" not yet implemented!"); @@ -1177,5 +929,5 @@ void run () { run_operations (stack); } - +#endif diff --git a/cmd/mrcat.cpp b/cmd/mrcat.cpp index a54785721f..55190a20c2 100644 --- a/cmd/mrcat.cpp +++ b/cmd/mrcat.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "algo/loop.h" @@ -40,6 +41,14 @@ ARGUMENTS + Argument ("output", "the output image.") .type_image_out (); +EXAMPLES + + Example ("Concatenate individual 3D volumes into a single 4D image series", + "mrcat volume*.mif series.mif", + "The wildcard characters will find all images in the current working directory with names that " + "begin with \"volume\" and end with \".mif\"; the mrcat command will receive these as a list of " + "input file names, from which it will produce a 4D image where the input volumes have been " + "concatenated along axis 3 (the fourth axis; the spatial axes are 0, 1 & 2)."); + OPTIONS + Option ("axis", "specify axis along which concatenation should be performed. By default, " @@ -56,10 +65,9 @@ OPTIONS template void write (vector
& in, const size_t axis, - Header& header_out, - const std::string& out_path) + Header& header_out) { - auto image_out = Image::create (out_path, header_out); + auto image_out = Image::create (header_out.name(), header_out); size_t axis_offset = 0; for (size_t i = 0; i != in.size(); i++) { @@ -86,155 +94,45 @@ void write (vector
& in, void run () { - int axis = get_option_value ("axis", -1); - - int num_images = argument.size()-1; - vector
in (num_images); - in[0] = Header::open (argument[0]); - - int ndims = 0; - int last_dim; - DataType datatype = in[0].datatype(); - DEBUG ("Datatype of first image (" + in[0].name() + "): " + datatype.specifier()); - - for (int i = 1; i < num_images; i++) { - in[i] = Header::open (argument[i]); - for (last_dim = in[i].ndim()-1; last_dim >= 0 && in[i].size (last_dim) <= 1; last_dim--); - if (last_dim > ndims) - ndims = last_dim; - DEBUG ("Datatype of image " + in[i].name() + ": " + in[i].datatype().specifier()); - if (in[i].datatype().is_complex()) - datatype.set_flag (DataType::Complex); - if (datatype.is_integer() && in[i].datatype().is_signed()) - datatype.set_flag (DataType::Signed); - if (in[i].datatype().is_floating_point() && datatype.is_integer()) - datatype = in[i].datatype(); - if (in[i].datatype().bytes() > datatype.bytes()) - datatype = (datatype() & DataType::Attributes) + (in[i].datatype()() & DataType::Type); + size_t num_images = argument.size()-1; + vector
headers; + ssize_t max_axis_nonunity = 0; + for (size_t i = 0; i != num_images; ++i) { + Header H = Header::open (argument[i]); + ssize_t a; + for (a = ssize_t(H.ndim())-1; a >= 0 && H.size (a) <= 1; a--); + max_axis_nonunity = std::max (max_axis_nonunity, a); + headers.push_back (std::move (H)); } - DEBUG (str("Output image datatype: ") + datatype.specifier()); - - if (axis < 0) axis = std::max (3, ndims); - ++ndims; - - for (int i = 0; i < ndims; i++) - if (i != axis) - for (int n = 0; n < num_images; n++) - if (in[0].size (i) != in[n].size (i)) - throw Exception ("dimensions of input images do not match"); - - if (axis >= ndims) ndims = axis+1; - - Header header_out (in[0]); - header_out.datatype() = DataType::from_command_line (datatype); - header_out.ndim() = ndims; - - for (size_t i = 0; i < header_out.ndim(); i++) { - if (header_out.size (i) <= 1) { - for (int n = 0; n < num_images; n++) { - if (in[n].ndim() > i) { - header_out.size(i) = in[n].size (i); - header_out.spacing(i) = in[n].spacing (i); - break; - } - } - } - } - - - size_t axis_dim = 0; - for (int n = 0; n < num_images; n++) - axis_dim += in[n].ndim() > size_t (axis) ? (in[n].size (axis) > 1 ? in[n].size (axis) : 1) : 1; - header_out.size (axis) = axis_dim; - - - if (axis > 2) { - // concatenate DW schemes - ssize_t nrows = 0, ncols = 0; - vector input_grads; - for (int n = 0; n < num_images; ++n) { - auto grad = DWI::get_DW_scheme (in[n]); - if (grad.rows() == 0 || grad.cols() < 4) { - nrows = 0; - break; - } - if (!ncols) { - ncols = grad.cols(); - } else if (grad.cols() != ncols) { - nrows = 0; - break; - } - nrows += grad.rows(); - input_grads.push_back (std::move (grad)); - } - if (nrows) { - Eigen::MatrixXd grad_out (nrows, 4); - int row = 0; - for (int n = 0; n < num_images; ++n) { - for (ssize_t i = 0; i < input_grads[n].rows(); ++i, ++row) - grad_out.row(row) = input_grads[n].row(i); - } - DWI::set_DW_scheme (header_out, grad_out); - } else { - header_out.keyval().erase ("dw_scheme"); - } - - // concatenate PE schemes - nrows = 0; ncols = 0; - vector input_schemes; - for (int n = 0; n != num_images; ++n) { - auto scheme = PhaseEncoding::parse_scheme (in[n]); - if (!scheme.rows()) { - nrows = 0; - break; - } - if (!ncols) { - ncols = scheme.cols(); - } else if (scheme.cols() != ncols) { - nrows = 0; - break; - } - nrows += scheme.rows(); - input_schemes.push_back (std::move (scheme)); - } - Eigen::MatrixXd scheme_out; - if (nrows) { - scheme_out.resize (nrows, ncols); - size_t row = 0; - for (int n = 0; n != num_images; ++n) { - for (ssize_t i = 0; i != input_schemes[n].rows(); ++i, ++row) - scheme_out.row(row) = input_schemes[n].row(i); - } - } - PhaseEncoding::set_scheme (header_out, scheme_out); - } - + const size_t axis = get_option_value ("axis", std::max (size_t(3), size_t(std::max (ssize_t(0), max_axis_nonunity)))); - const std::string out_path = argument[num_images]; + Header header_out = concatenate (headers, axis, true); + header_out.name() = std::string (argument[num_images]); + header_out.datatype() = DataType::from_command_line (header_out.datatype()); - if (header_out.intensity_offset() == 0.0 && header_out.intensity_scale() == 1.0 && !datatype.is_floating_point()) { - switch (datatype() & DataType::Type) { + if (header_out.intensity_offset() == 0.0 && header_out.intensity_scale() == 1.0 && !header_out.datatype().is_floating_point()) { + switch (header_out.datatype()() & DataType::Type) { case DataType::Bit: case DataType::UInt8: case DataType::UInt16: case DataType::UInt32: - if (datatype.is_signed()) - write (in, axis, header_out, out_path); + if (header_out.datatype().is_signed()) + write (headers, axis, header_out); else - write (in, axis, header_out, out_path); + write (headers, axis, header_out); break; case DataType::UInt64: - if (datatype.is_signed()) - write (in, axis, header_out, out_path); + if (header_out.datatype().is_signed()) + write (headers, axis, header_out); else - write (in, axis, header_out, out_path); + write (headers, axis, header_out); break; } } else { - if (datatype.is_complex()) - write (in, axis, header_out, out_path); + if (header_out.datatype().is_complex()) + write (headers, axis, header_out); else - write (in, axis, header_out, out_path); + write (headers, axis, header_out); } } diff --git a/cmd/mrcentroid.cpp b/cmd/mrcentroid.cpp new file mode 100644 index 0000000000..410f0fc5a8 --- /dev/null +++ b/cmd/mrcentroid.cpp @@ -0,0 +1,80 @@ +/* Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "command.h" +#include "image.h" +#include "image_helpers.h" +#include "transform.h" +#include "types.h" + +using namespace MR; +using namespace App; + + +void usage () +{ + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Determine the centre of mass / centre of gravity of an image"; + + ARGUMENTS + + Argument ("input", "the input image").type_image_in(); + + OPTIONS + + Option ("mask", "only include voxels within a mask in the calculation") + + Argument ("image").type_image_in() + + + Option ("voxelspace", "report image centre of mass in voxel space rather than scanner space"); + +} + + +typedef float value_type; + + +void run () +{ + Image image = Image::open (argument[0]); + if (image.ndim() > 3) + throw Exception ("Command does not accept images with more than 3 dimensions"); + + Image mask; + auto opt = get_options ("mask"); + if (opt.size()) { + mask = Image::open (opt[0][0]); + check_dimensions (image, mask); + } + + Eigen::Vector3 com (0.0, 0.0, 0.0); + default_type mass = 0.0; + if (mask.valid()) { + for (auto l = Loop(image) (image, mask); l; ++l) { + if (mask.value()) { + com += Eigen::Vector3 (image.index(0), image.index(1), image.index(2)) * image.value(); + mass += image.value(); + } + } + } else { + for (auto l = Loop(image) (image); l; ++l) { + com += Eigen::Vector3 (image.index(0), image.index(1), image.index(2)) * image.value(); + mass += image.value(); + } + } + + com /= mass; + if (!get_options ("voxelspace").size()) + com = image.transform() * com; + + std::cout << com.transpose(); +} diff --git a/cmd/mrcheckerboardmask.cpp b/cmd/mrcheckerboardmask.cpp index c990da0c39..24b34fe8d3 100644 --- a/cmd/mrcheckerboardmask.cpp +++ b/cmd/mrcheckerboardmask.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index e067d2b8a1..afbcb7a8ce 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -1,28 +1,32 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" -#include "file/path.h" -#include "algo/loop.h" #include "image.h" -#include "math/SH.h" -#include "dwi/directions/predefined.h" -#include "timer.h" +#include "types.h" + +#include "algo/loop.h" +#include "file/path.h" + +#include "math/stats/fwe.h" #include "math/stats/glm.h" -#include "math/stats/permutation.h" +#include "math/stats/import.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" + #include "stats/cluster.h" #include "stats/enhance.h" #include "stats/permtest.h" @@ -32,12 +36,17 @@ using namespace MR; using namespace App; using namespace MR::Math::Stats; +using namespace MR::Math::Stats::GLM; + +using Stats::PermTest::count_matrix_type; #define DEFAULT_TFCE_DH 0.1 #define DEFAULT_TFCE_H 2.0 #define DEFAULT_TFCE_E 0.5 +#define DEFAULT_EMPIRICAL_SKEW 1.0 + void usage () { @@ -45,23 +54,26 @@ void usage () SYNOPSIS = "Voxel-based analysis using permutation testing and threshold-free cluster enhancement"; + DESCRIPTION + + Math::Stats::GLM::column_ones_description; + REFERENCES + "* If not using the -threshold command-line option:\n" "Smith, S. M. & Nichols, T. E. " "Threshold-free cluster enhancement: Addressing problems of smoothing, threshold dependence and localisation in cluster inference. " "NeuroImage, 2009, 44, 83-98" - + "* If using the -nonstationary option:" - "Salimi-Khorshidi, G. Smith, S.M. Nichols, T.E. Adjusting the effect of nonstationarity in cluster-based and TFCE inference. \n" - "Neuroimage, 2011, 54(3), 2006-19\n"; + + "* If using the -nonstationary option:\n" + "Salimi-Khorshidi, G. Smith, S.M. Nichols, T.E. Adjusting the effect of nonstationarity in cluster-based and TFCE inference. " + "Neuroimage, 2011, 54(3), 2006-19"; ARGUMENTS + Argument ("input", "a text file containing the file names of the input images, one file per line").type_file_in() - + Argument ("design", "the design matrix, rows should correspond with images in the input image text file").type_file_in() + + Argument ("design", "the design matrix").type_file_in() - + Argument ("contrast", "the contrast matrix, only specify one contrast as it will automatically compute the opposite contrast.").type_file_in() + + Argument ("contrast", "the contrast matrix").type_file_in() + Argument ("mask", "a mask used to define voxels included in the analysis.").type_image_in() @@ -69,18 +81,17 @@ void usage () OPTIONS - + Stats::PermTest::Options (true) + + Math::Stats::shuffle_options (true, DEFAULT_EMPIRICAL_SKEW) + Stats::TFCE::Options (DEFAULT_TFCE_DH, DEFAULT_TFCE_E, DEFAULT_TFCE_H) - + OptionGroup ("Additional options for mrclusterstats") + + Math::Stats::GLM::glm_options ("voxel") - + Option ("negative", "automatically test the negative (opposite) contrast. By computing the opposite contrast simultaneously " - "the computation time is reduced.") + + OptionGroup ("Additional options for mrclusterstats") + Option ("threshold", "the cluster-forming threshold to use for a standard cluster-based analysis. " "This disables TFCE, which is the default otherwise.") - + Argument ("value").type_float (1.0e-6) + + Argument ("value").type_float (1.0e-6) + Option ("connectivity", "use 26-voxel-neighbourhood connectivity (Default: 6)"); @@ -88,20 +99,83 @@ void usage () -template +using value_type = Stats::TFCE::value_type; + + + +template void write_output (const VectorType& data, - const vector >& mask_indices, - ImageType& image) { - for (size_t i = 0; i < mask_indices.size(); i++) { - for (size_t dim = 0; dim < image.ndim(); dim++) - image.index(dim) = mask_indices[i][dim]; + const Voxel2Vector& v2v, + const std::string& path, + const Header& header) { + auto image = Image::create (path, header); + for (size_t i = 0; i != v2v.size(); i++) { + assign_pos_of (v2v[i]).to (image); image.value() = data[i]; } } -using value_type = Stats::TFCE::value_type; +// Define data importer class that will obtain voxel data for a +// specific subject based on the string path to the image file for +// that subject +// +// The challenge with this mechanism for voxel data is that the +// class must know how to map data from voxels in 3D space into +// a 1D vector of data. This mapping must be done based on the +// analysis mask prior to the importing of any subject data. +// Moreover, in the case of voxel-wise design matrix columns, the +// class must have access to this mapping functionality without +// any modification of the class constructor (since these data +// are initialised in the CohortDataImport class). +// +class SubjectVoxelImport : public SubjectDataImportBase +{ MEMALIGN(SubjectVoxelImport) + public: + SubjectVoxelImport (const std::string& path) : + SubjectDataImportBase (path), + H (Header::open (path)), + data (H.get_image()) { } + + virtual ~SubjectVoxelImport() { } + + void operator() (matrix_type::RowXpr row) const override + { + assert (v2v); + Image temp (data); // For thread-safety + for (size_t i = 0; i != size(); ++i) { + assign_pos_of ((*v2v)[i]).to (temp); + row[i] = temp.value(); + } + } + + default_type operator[] (const size_t index) const override + { + assert (v2v); + Image temp (data); // For thread-safety + assign_pos_of ((*v2v)[index]).to (temp); + assert (!is_out_of_bounds (temp)); + return temp.value(); + } + + size_t size() const override { assert (v2v); return v2v->size(); } + + const Header& header() const { return H; } + + static void set_mapping (std::shared_ptr& ptr) { + v2v = ptr; + } + + private: + Header H; + const Image data; + + static std::shared_ptr v2v; + +}; +std::shared_ptr SubjectVoxelImport::v2v = nullptr; + @@ -112,89 +186,93 @@ void run() { const value_type tfce_H = get_option_value ("tfce_h", DEFAULT_TFCE_H); const value_type tfce_E = get_option_value ("tfce_e", DEFAULT_TFCE_E); const bool use_tfce = !std::isfinite (cluster_forming_threshold); - int num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); - int nperms_nonstationary = get_option_value ("nperms_nonstationary", DEFAULT_NUMBER_PERMUTATIONS_NONSTATIONARITY); - const bool do_26_connectivity = get_options("connectivity").size(); - const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); + const bool do_nonstationarity_adjustment = get_options ("nonstationarity").size(); + const default_type empirical_skew = get_option_value ("skew_nonstationarity", DEFAULT_EMPIRICAL_SKEW); - // Read filenames - vector subjects; - { - std::string folder = Path::dirname (argument[0]); - std::ifstream ifs (argument[0].c_str()); - std::string temp; - while (getline (ifs, temp)) - subjects.push_back (Path::join (folder, temp)); + // Load analysis mask and compute adjacency + auto mask_header = Header::open (argument[3]); + check_effective_dimensionality (mask_header, 3); + auto mask_image = mask_header.get_image(); + std::shared_ptr v2v = make_shared (mask_image, mask_header); + SubjectVoxelImport::set_mapping (v2v); + Filter::Connector connector; + connector.adjacency.set_26_adjacency (do_26_connectivity); + connector.adjacency.initialise (mask_header, *v2v); + const size_t num_voxels = v2v->size(); + + // Read file names and check files exist + CohortDataImport importer; + importer.initialise (argument[0]); + for (size_t i = 0; i != importer.size(); ++i) { + if (!dimensions_match (dynamic_cast(importer[i].get())->header(), mask_header)) + throw Exception ("Image file \"" + importer[i]->name() + "\" does not match analysis mask"); } + CONSOLE ("Number of inputs: " + str(importer.size())); // Load design matrix const matrix_type design = load_matrix (argument[1]); - if (design.rows() != (ssize_t)subjects.size()) - throw Exception ("number of input files does not match number of rows in design matrix"); - - // Load permutations file if supplied - auto opt = get_options("permutations"); - vector > permutations; - if (opt.size()) { - permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - num_perms = permutations.size(); - if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); + if (design.rows() != (ssize_t)importer.size()) + throw Exception ("Number of input files does not match number of rows in design matrix"); + + // Before validating the contrast matrix, we first need to see if there are any + // additional design matrix columns coming from voxel-wise subject data + // TODO Functionalise this + vector extra_columns; + bool nans_in_columns = false; + auto opt = get_options ("column"); + for (size_t i = 0; i != opt.size(); ++i) { + extra_columns.push_back (CohortDataImport()); + extra_columns[i].initialise (opt[i][0]); + if (!extra_columns[i].allFinite()) + nans_in_columns = true; } - - // Load non-stationary correction permutations file if supplied - opt = get_options("permutations_nonstationary"); - vector > permutations_nonstationary; - if (opt.size()) { - permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - nperms_nonstationary = permutations.size(); - if (permutations_nonstationary[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); + const ssize_t num_factors = design.cols() + extra_columns.size(); + CONSOLE ("Number of factors: " + str(num_factors)); + if (extra_columns.size()) { + CONSOLE ("Number of element-wise design matrix columns: " + str(extra_columns.size())); + if (nans_in_columns) + CONSOLE ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from voxel-wise design matrices accordingly"); } - - // Load contrast matrix - const matrix_type contrast = load_matrix (argument[2]); - if (contrast.cols() != design.cols()) - throw Exception ("the number of contrasts does not equal the number of columns in the design matrix"); - - auto mask_header = Header::open (argument[3]); - if (!(mask_header.ndim() == 3 || (mask_header.ndim() == 4 && mask_header.size(3) == 1))) - throw Exception ("Template mask is not a 3D image"); - // Load Mask and compute adjacency - auto mask_image = mask_header.get_image(); - Filter::Connector connector (do_26_connectivity); - vector > mask_indices = connector.precompute_adjacency (mask_image); - const size_t num_vox = mask_indices.size(); - - matrix_type data (num_vox, subjects.size()); - + check_design (design, extra_columns.size()); + + // Load variance groups + auto variance_groups = GLM::load_variance_groups (design.rows()); + const size_t num_vgs = variance_groups.size() ? variance_groups.maxCoeff()+1 : 1; + if (num_vgs > 1) + CONSOLE ("Number of variance groups: " + str(num_vgs)); + + // Load hypotheses + const vector hypotheses = Math::Stats::GLM::load_hypotheses (argument[2]); + const size_t num_hypotheses = hypotheses.size(); + if (hypotheses[0].cols() != num_factors) + throw Exception ("The number of columns in the contrast matrix (" + str(hypotheses[0].cols()) + ")" + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); + CONSOLE ("Number of hypotheses: " + str(num_hypotheses)); + + matrix_type data (importer.size(), num_voxels); { // Load images - ProgressBar progress("loading images", subjects.size()); - for (size_t subject = 0; subject < subjects.size(); subject++) { - LogLevelLatch log_level (0); - auto input_image = Image::open (subjects[subject]); - check_dimensions (input_image, mask_image, 0, 3); - int index = 0; - vector >::iterator it; - for (it = mask_indices.begin(); it != mask_indices.end(); ++it) { - input_image.index(0) = (*it)[0]; - input_image.index(1) = (*it)[1]; - input_image.index(2) = (*it)[2]; - data (index++, subject) = input_image.value(); - } + ProgressBar progress ("loading input images", importer.size()); + for (size_t subject = 0; subject < importer.size(); subject++) { + (*importer[subject]) (data.row (subject)); progress++; } } - if (!data.allFinite()) - WARN ("input data contains non-finite value(s)"); + const bool nans_in_data = !data.allFinite(); + if (nans_in_data) { + INFO ("Non-finite values present in data; rows will be removed from voxel-wise design matrices accordingly"); + if (!extra_columns.size()) { + INFO ("(Note that this will result in slower execution than if such values were not present)"); + } + } Header output_header (mask_header); output_header.datatype() = DataType::Float32; - output_header.keyval()["num permutations"] = str(num_perms); + //output_header.keyval()["num permutations"] = str(num_perms); output_header.keyval()["26 connectivity"] = str(do_26_connectivity); - output_header.keyval()["nonstationary adjustment"] = str(do_nonstationary_adjustment); + output_header.keyval()["nonstationary adjustment"] = str(do_nonstationarity_adjustment); if (use_tfce) { output_header.keyval()["tfce_dh"] = str(tfce_dh); output_header.keyval()["tfce_e"] = str(tfce_E); @@ -204,16 +282,63 @@ void run() { } const std::string prefix (argument[4]); - bool compute_negative_contrast = get_options("negative").size(); - vector_type default_cluster_output (num_vox); - std::shared_ptr default_cluster_output_neg; - vector_type tvalue_output (num_vox); - vector_type empirical_enhanced_statistic; - if (compute_negative_contrast) - default_cluster_output_neg.reset (new vector_type (num_vox)); + // Only add contrast matrix row number to image outputs if there's more than one hypothesis + auto postfix = [&] (const size_t i) { return (num_hypotheses > 1) ? ("_" + hypotheses[i].name()) : ""; }; - Math::Stats::GLMTTest glm (data, design, contrast); + { + matrix_type betas (num_factors, num_voxels); + matrix_type abs_effect_size (num_voxels, num_hypotheses); + matrix_type std_effect_size (num_voxels, num_hypotheses); + matrix_type stdev (num_vgs, num_voxels); + vector_type cond (num_voxels); + + Math::Stats::GLM::all_stats (data, design, extra_columns, hypotheses, variance_groups, + cond, betas, abs_effect_size, std_effect_size, stdev); + + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_hypotheses) + num_vgs + (nans_in_data || extra_columns.size() ? 1 : 0)); + for (ssize_t i = 0; i != num_factors; ++i) { + write_output (betas.row(i), *v2v, prefix + "beta" + str(i) + ".mif", output_header); + ++progress; + } + for (size_t i = 0; i != num_hypotheses; ++i) { + if (!hypotheses[i].is_F()) { + write_output (abs_effect_size.col(i), *v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); + ++progress; + if (num_vgs == 1) + write_output (std_effect_size.col(i), *v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); + } else { + ++progress; + } + ++progress; + } + if (nans_in_data || extra_columns.size()) { + write_output (cond, *v2v, prefix + "cond.mif", output_header); + ++progress; + } + if (num_vgs == 1) { + write_output (stdev.row(0), *v2v, prefix + "std_dev.mif", output_header); + } else { + for (size_t i = 0; i != num_vgs; ++i) { + write_output (stdev.row(i), *v2v, prefix + "std_dev.mif", output_header); + ++progress; + } + } + } + + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + if (variance_groups.size()) + glm_test.reset (new GLM::TestVariableHeteroscedastic (extra_columns, data, design, hypotheses, variance_groups, nans_in_data, nans_in_columns)); + else + glm_test.reset (new GLM::TestVariableHomoscedastic (extra_columns, data, design, hypotheses, nans_in_data, nans_in_columns)); + } else { + if (variance_groups.size()) + glm_test.reset (new GLM::TestFixedHeteroscedastic (data, design, hypotheses, variance_groups)); + else + glm_test.reset (new GLM::TestFixedHomoscedastic (data, design, hypotheses)); + } std::shared_ptr enhancer; if (use_tfce) { @@ -223,121 +348,59 @@ void run() { enhancer.reset (new Stats::Cluster::ClusterSize (connector, cluster_forming_threshold)); } - if (do_nonstationary_adjustment) { + matrix_type empirical_enhanced_statistic; + if (do_nonstationarity_adjustment) { if (!use_tfce) - throw Exception ("nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); - empirical_enhanced_statistic = vector_type::Zero (num_vox); - if (permutations_nonstationary.size()) { - Stats::PermTest::PermutationStack permutations (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment..."); - Stats::PermTest::precompute_empirical_stat (glm, enhancer, permutations, empirical_enhanced_statistic); - } else { - Stats::PermTest::PermutationStack permutations (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment...", false); - Stats::PermTest::precompute_empirical_stat (glm, enhancer, permutations, empirical_enhanced_statistic); - } + throw Exception ("Nonstationarity adjustment is not currently implemented for threshold-based cluster analysis"); + Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_skew, empirical_enhanced_statistic); + for (size_t i = 0; i != num_hypotheses; ++i) + write_output (empirical_enhanced_statistic.col(i), *v2v, prefix + "empirical" + postfix(i) + ".mif", output_header); + } - save_matrix (empirical_enhanced_statistic, prefix + "empirical.txt"); + // Precompute statistic value and enhanced statistic for the default permutation + matrix_type default_statistic, default_zstat, default_enhanced; + Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_enhanced_statistic, default_statistic, default_zstat, default_enhanced); + for (size_t i = 0; i != num_hypotheses; ++i) { + write_output (default_statistic.col (i), *v2v, prefix + (hypotheses[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".mif", output_header); + write_output (default_zstat .col (i), *v2v, prefix + "Zstat" + postfix(i) + ".mif", output_header); + write_output (default_enhanced .col (i), *v2v, prefix + (use_tfce ? "tfce" : "clustersize") + postfix(i) + ".mif", output_header); } - Stats::PermTest::precompute_default_permutation (glm, enhancer, empirical_enhanced_statistic, - default_cluster_output, default_cluster_output_neg, tvalue_output); + if (!get_options ("notest").size()) { - { - ProgressBar progress ("generating pre-permutation output", (compute_negative_contrast ? 3 : 2) + contrast.cols() + 3); - { - auto tvalue_image = Image::create (prefix + "tvalue.mif", output_header); - write_output (tvalue_output, mask_indices, tvalue_image); - } - ++progress; - { - auto cluster_image = Image::create (prefix + (use_tfce ? "tfce.mif" : "cluster_sizes.mif"), output_header); - write_output (default_cluster_output, mask_indices, cluster_image); - } - ++progress; - if (compute_negative_contrast) { - assert (default_cluster_output_neg); - auto cluster_image_neg = Image::create (prefix + (use_tfce ? "tfce_neg.mif" : "cluster_sizes_neg.mif"), output_header); - write_output (*default_cluster_output_neg, mask_indices, cluster_image_neg); - ++progress; - } - auto temp = Math::Stats::GLM::solve_betas (data, design); - for (ssize_t i = 0; i < contrast.cols(); ++i) { - auto beta_image = Image::create (prefix + "beta" + str(i) + ".mif", output_header); - write_output (temp.row(i), mask_indices, beta_image); - ++progress; + const bool fwe_strong = get_option_value ("strong", false); + if (fwe_strong && num_hypotheses == 1) { + WARN("Option -strong has no effect when testing a single hypothesis only"); } - { - const auto temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); - auto abs_effect_image = Image::create (prefix + "abs_effect.mif", output_header); - write_output (temp.row(0), mask_indices, abs_effect_image); - } - ++progress; - { - const auto temp = Math::Stats::GLM::std_effect_size (data, design, contrast); - auto std_effect_image = Image::create (prefix + "std_effect.mif", output_header); - write_output (temp.row(0), mask_indices, std_effect_image); - } - ++progress; - { - const auto temp = Math::Stats::GLM::stdev (data, design); - auto std_dev_image = Image::create (prefix + "std_dev.mif", output_header); - write_output (temp.row(0), mask_indices, std_dev_image); - } - } - if (!get_options ("notest").size()) { + matrix_type null_distribution, uncorrected_pvalue; + count_matrix_type null_contributions; - vector_type perm_distribution (num_perms); - std::shared_ptr perm_distribution_neg; - vector_type uncorrected_pvalue (num_vox); - std::shared_ptr uncorrected_pvalue_neg; + Stats::PermTest::run_permutations (glm_test, enhancer, empirical_enhanced_statistic, default_enhanced, fwe_strong, + null_distribution, null_contributions, uncorrected_pvalue); - if (compute_negative_contrast) { - perm_distribution_neg.reset (new vector_type (num_perms)); - uncorrected_pvalue_neg.reset (new vector_type (num_vox)); - } + ProgressBar progress ("Outputting final results", (fwe_strong ? 1 : num_hypotheses) + 1 + 3*num_hypotheses); - if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm, enhancer, empirical_enhanced_statistic, - default_cluster_output, default_cluster_output_neg, - perm_distribution, perm_distribution_neg, - uncorrected_pvalue, uncorrected_pvalue_neg); + if (fwe_strong) { + save_vector (null_distribution.col(0), prefix + "null_dist.txt"); + ++progress; } else { - Stats::PermTest::run_permutations (num_perms, glm, enhancer, empirical_enhanced_statistic, - default_cluster_output, default_cluster_output_neg, - perm_distribution, perm_distribution_neg, - uncorrected_pvalue, uncorrected_pvalue_neg); - } - - save_matrix (perm_distribution, prefix + "perm_dist.txt"); - if (compute_negative_contrast) { - assert (perm_distribution_neg); - save_matrix (*perm_distribution_neg, prefix + "perm_dist_neg.txt"); + for (size_t i = 0; i != num_hypotheses; ++i) { + save_vector (null_distribution.col(i), prefix + "null_dist" + postfix(i) + ".txt"); + ++progress; + } } - ProgressBar progress ("generating output", compute_negative_contrast ? 4 : 2); - { - auto uncorrected_pvalue_image = Image::create (prefix + "uncorrected_pvalue.mif", output_header); - write_output (uncorrected_pvalue, mask_indices, uncorrected_pvalue_image); - } - ++progress; - { - vector_type fwe_pvalue_output (num_vox); - Math::Stats::Permutation::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); - auto fwe_pvalue_image = Image::create (prefix + "fwe_pvalue.mif", output_header); - write_output (fwe_pvalue_output, mask_indices, fwe_pvalue_image); - } + const matrix_type fwe_pvalue_output = MR::Math::Stats::fwe_pvalue (null_distribution, default_enhanced); ++progress; - if (compute_negative_contrast) { - assert (uncorrected_pvalue_neg); - assert (perm_distribution_neg); - auto uncorrected_pvalue_image_neg = Image::create (prefix + "uncorrected_pvalue_neg.mif", output_header); - write_output (*uncorrected_pvalue_neg, mask_indices, uncorrected_pvalue_image_neg); + for (size_t i = 0; i != num_hypotheses; ++i) { + write_output (fwe_pvalue_output.col(i), *v2v, prefix + "fwe_1mpvalue" + postfix(i) + ".mif", output_header); + ++progress; + write_output (uncorrected_pvalue.col(i), *v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); + ++progress; + write_output (null_contributions.col(i), *v2v, prefix + "null_contributions" + postfix(i) + ".mif", output_header); ++progress; - vector_type fwe_pvalue_output_neg (num_vox); - Math::Stats::Permutation::statistic2pvalue (*perm_distribution_neg, *default_cluster_output_neg, fwe_pvalue_output_neg); - auto fwe_pvalue_image_neg = Image::create (prefix + "fwe_pvalue_neg.mif", output_header); - write_output (fwe_pvalue_output_neg, mask_indices, fwe_pvalue_image_neg); } - } + } } diff --git a/cmd/mrcolour.cpp b/cmd/mrcolour.cpp new file mode 100644 index 0000000000..680a645e2b --- /dev/null +++ b/cmd/mrcolour.cpp @@ -0,0 +1,168 @@ +/* Copyright (c) 2008-2019 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#include "command.h" +#include "datatype.h" +#include "header.h" +#include "image.h" +#include "mrtrix.h" +#include "stride.h" +#include "types.h" + +#include "algo/loop.h" +#include "algo/min_max.h" +#include "colourmap.h" + +using namespace MR; +using namespace App; + + +vector colourmap_choices_std; +vector colourmap_choices_cstr; + +void usage () +{ + + const ColourMap::Entry* entry = ColourMap::maps; + do { + if (strcmp(entry->name, "Complex")) + colourmap_choices_std.push_back (lowercase (entry->name)); + ++entry; + } while (entry->name); + colourmap_choices_cstr.reserve (colourmap_choices_std.size() + 1); + for (const auto& s : colourmap_choices_std) + colourmap_choices_cstr.push_back (s.c_str()); + colourmap_choices_cstr.push_back (nullptr); + + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Apply a colour map to an image"; + + DESCRIPTION + + "Under typical usage, this command will receive as input ad 3D greyscale image, and " + "output a 4D image with 3 volumes corresponding to red-green-blue components; " + "other use cases are possible, and are described in more detail below." + + + "By default, the command will automatically determine the maximum and minimum " + "intensities of the input image, and use that information to set the upper and " + "lower bounds of the applied colourmap. This behaviour can be overridden by manually " + "specifying these bounds using the -upper and -lower options respectively."; + + ARGUMENTS + + Argument ("input", "the input image").type_image_in() + + Argument ("map", "the colourmap to apply; choices are: " + join(colourmap_choices_std, ",")).type_choice (colourmap_choices_cstr.data()) + + Argument ("output", "the output image").type_image_out(); + + OPTIONS + + Option ("upper", "manually set the upper intensity of the colour mapping") + + Argument ("value").type_float() + + + Option ("lower", "manually set the lower intensity of the colour mapping") + + Argument ("value").type_float() + + + Option ("colour", "set the target colour for use of the 'colour' map (three comma-separated floating-point values)") + + Argument ("values").type_sequence_float(); + +} + + + + + +void run () +{ + Header H_in = Header::open (argument[0]); + const ColourMap::Entry colourmap = ColourMap::maps[argument[1]]; + Eigen::Vector3 fixed_colour (NaN, NaN, NaN); + if (colourmap.is_colour) { + if (!(H_in.ndim() == 3 || (H_in.ndim() == 4 && H_in.size(3) == 1))) + throw Exception ("For applying a fixed colour, command expects a 3D image as input"); + auto opt = get_options ("colour"); + if (!opt.size()) + throw Exception ("For \'colour\' colourmap, target colour must be specified using the -colour option"); + const auto values = parse_floats (opt[0][0]); + if (values.size() != 3) + throw Exception ("Target colour must be specified as a comma-separated list of three values"); + fixed_colour = Eigen::Vector3 (values.data()); + if (fixed_colour.minCoeff() < 0.0) + throw Exception ("Values for fixed colour provided via -colour option cannot be negative"); + } else if (colourmap.is_rgb) { + if (!(H_in.ndim() == 4 && H_in.size(3) == 3)) + throw Exception ("\'rgb\' colourmap only applies to 4D images with 3 volumes"); + if (get_options ("lower").size()) { + WARN ("Option -lower ignored: not compatible with \'rgb\' colourmap (a minimum of 0.0 is assumed)"); + } + } else { + if (!(H_in.ndim() == 3 || (H_in.ndim() == 4 && H_in.size(3) == 1))) + throw Exception ("For standard colour maps, command expects a 3D image as input"); + if (get_options ("colour").size()) { + WARN ("Option -colour ignored: only applies if \'colour\' colourmap is used"); + } + } + + auto in = H_in.get_image(); + float lower = int(argument[1]) == 6 ? 0.0 : get_option_value ("lower", NaN); + float upper = get_option_value ("upper", NaN); + if (!std::isfinite (lower) || !std::isfinite (upper)) { + float image_min = NaN, image_max = NaN; + min_max (in, image_min, image_max); + if (int(argument[1]) == 6) { // RGB + image_max = std::max (MR::abs (image_min), MR::abs (image_max)); + } else { + if (!std::isfinite (lower)) { + if (!std::isfinite (image_min)) + throw Exception ("Unable to determine minimum value from image"); + lower = image_min; + } + } + if (!std::isfinite (upper)) { + if (!std::isfinite (image_max)) + throw Exception ("Unable to determine maximum value from image"); + upper = image_max; + } + } + const float multiplier = 1.0f / (upper - lower); + + auto scale = [&] (const float value) { return std::max (0.0f, std::min (1.0f, multiplier * (value - lower))); }; + + Header H_out (H_in); + H_out.ndim() = 4; + H_out.size(3) = 3; + Stride::set (H_out, Stride::contiguous_along_axis (3, H_out)); + H_out.datatype() = DataType::Float32; + H_out.datatype().set_byte_order_native(); + auto out = Image::create (argument[2], H_out); + + + if (colourmap.is_colour) { + assert (fixed_colour.allFinite()); + for (auto l_outer = Loop ("Applying fixed RGB colour to greyscale image", H_in) (in, out); l_outer; ++l_outer) { + const float amplitude = std::max (0.0f, std::min (1.0f, scale (in.value()))); + for (auto l_inner = Loop(3) (out); l_inner; ++l_inner) + out.value() = amplitude * fixed_colour[out.index(3)]; + } + } else if (colourmap.is_rgb) { + for (auto l_outer = Loop ("Scaling RGB colour image", H_in) (in, out); l_outer; ++l_outer) + out.value() = scale (in.value()); + } else { + const ColourMap::Entry::basic_map_fn& map_fn = colourmap.basic_mapping; + for (auto l_outer = Loop ("Mapping intensities to RGB colours", H_in) (in, out); l_outer; ++l_outer) { + const Eigen::Array3f colour = map_fn (scale (in.value())); + for (auto l_inner = Loop(3) (out); l_inner; ++l_inner) + out.value() = colour[out.index(3)]; + } + } +} diff --git a/cmd/mrconvert.cpp b/cmd/mrconvert.cpp index 2ebc65250e..4e02e44bc9 100644 --- a/cmd/mrconvert.cpp +++ b/cmd/mrconvert.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "axes.h" #include "command.h" #include "header.h" @@ -31,6 +32,10 @@ using namespace MR; using namespace App; + + +bool add_to_command_history = true; + void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com) and Robert E. Smith (robert.smith@florey.edu.au)"; @@ -45,50 +50,20 @@ void usage () "specific region of interest, or flip the images. Some of the possible " "operations are described in more detail below." - + "The -coord option is used to select the coordinates within the input " - "image that are to be retained in the output image. This can therefore " - "be used to include or exclude subsets of slices along a particular " - "spatial axis, or volumes / series within higher dimensions. " - "For instance: " - "-coord 3 0 extracts the first volume from a 4D image; " - "-coord 1 24 extracts slice number 24 along the y-axis." + + "Note that for both the -coord and -axes options, indexing starts from 0 " + "rather than 1. E.g. " + "-coord 3 <#> selects volumes (the fourth dimension) from the series; " + "-axes 0,1,2 includes only the three spatial axes in the output image." - + "The colon operator can be particularly useful in conjunction with the " - "-coord option, in order to select multiple coordinates. " - "For instance: " - "-coord 3 1:59 " - "would select all but the first volume from an image containing 60 volumes." + + "Additionally, for the second input to the -coord option and the -axes " + "option, you can use any valid number sequence in the selection, as well " + "as the 'end' keyword (see the main documentation for details); this can be " + "particularly useful to select multiple coordinates." + "The -vox option is used to change the size of the voxels in the output " - "image. Note that this does not re-sample the image based on a new " - "voxel size (that is done using the mrresize command); this only changes " - "the voxel size as reported in the image header. Voxel sizes for " - "individual axes can be set independently, using a comma-separated list of " - "values; e.g. " - "-vox 1,,3.5 " - "will change the voxel size along the x & z axes to 1.0mm and 3.5mm " - "respectively, and leave the y-axis voxel size unchanged." - - + "The -axes option specifies which axes from the input image will be used " - "to form the output image. This allows the permutation, omission, or " - "addition of axes into the output image. The axes should be supplied as a " - "comma-separated list of axis indices, e.g. " - "-axes 0,1,2 " - "would select only the three spatial axes to form the output image. If an " - "axis from the input image is to be omitted from the output image, it must " - "have dimension 1; either in the input image itself, or a single coordinate " - "along that axis must be selected by the user by using the -coord option. " - "An axis of unity dimension can be inserted by supplying -1 at the " - "corresponding position in the list." - - + "The -scaling option specifies the data scaling parameters stored within " - "the image header that are used to rescale the image intensity values. " - "Where the raw data stored in a particular voxel is I, the value within " - "that voxel is interpreted as: " - "value = offset + (scale x I). " - "To adjust this scaling, the relevant parameters must be provided as a " - "comma-separated 2-vector of floating-point values, in the format " - "\"offset,scale\" (no quotation marks)." + "image as reported in the image header; note however that this does not " + "re-sample the image based on a new voxel size (that is done using the " + "mrresize command)." + "By default, the intensity scaling parameters in the input image header " "are passed through to the output image header when writing to an integer " @@ -96,10 +71,90 @@ void usage () "images. Note that the -scaling option will therefore have no effect for " "floating-point or binary output images." - + "Note that for both the -coord and -axes options, indexing starts from 0 " - "rather than 1. E.g. " - "-coord 3 <#> selects volumes (the fourth dimension) from the series; " - "-axes 0,1,2 includes only the three spatial axes in the output image."; + + "The -axes option specifies which axes from the input image will be used " + "to form the output image. This allows the permutation, omission, or " + "addition of axes into the output image. The axes should be supplied as a " + "comma-separated list of axis indices. If an axis from the input image is " + "to be omitted from the output image, it must either already have a size of " + "1, or a single coordinate along that axis must be selected by the user by " + "using the -coord option. Examples are provided further below." + + + DWI::bvalue_scaling_description; + + + EXAMPLES + + Example ("Extract the first volume from a 4D image, and make the output a 3D image", + "mrconvert in.mif -coord 3 0 -axes 0,1,2 out.mif", + "The -coord 3 0 option extracts, from axis number 3 (which is the " + "fourth axis since counting begins from 0; this is the axis that " + "steps across image volumes), only coordinate number 0 (i.e. the " + "first volume). The -axes 0,1,2 ensures that only the first three " + "axes (i.e. the spatial axes) are retained; if this option were not " + "used in this example, then image out.mif would be a 4D image, " + "but it would only consist of a single volume, and mrinfo would " + "report its size along the fourth axis as 1.") + + + Example ("Extract slice number 24 along the AP direction", + "mrconvert volume.mif slice.mif -coord 1 24", + "MRtrix3 uses a RAS (Right-Anterior-Superior) axis " + "convention, and internally reorients images upon loading " + "in order to conform to this as far as possible. So for " + "non-exotic data, axis 1 should correspond (approximately) to the " + "anterior-posterior direction.") + + + Example ("Extract only every other volume from a 4D image", + "mrconvert all.mif every_other.mif -coord 3 1:2:end", + "This example demonstrates two features: Use of the " + "colon syntax to conveniently specify a number sequence " + "(in the format \'start:step:stop\'); and use of the \'end\' " + "keyword to generate this sequence up to the size of the " + "input image along that axis (i.e. the number of volumes).") + + + Example ("Alter the image header to report a new isotropic voxel size", + "mrconvert in.mif isotropic.mif -vox 1.25", + "By providing a single value to the -vox option only, the " + "specified value is used to set the voxel size in mm for all " + "three spatial axes in the output image.") + + + Example ("Alter the image header to report a new anisotropic voxel size", + "mrconvert in.mif anisotropic.mif -vox 1,,3.5", + "This example will change the reported voxel size along the first " + "and third axes (ideally left-right and inferior-superior) to " + "1.0mm and 3.5mm respectively, and leave the voxel size along the " + "second axis (ideally anterior-posterior) unchanged.") + + + Example ("Turn a single-volume 4D image into a 3D image", + "mrconvert 4D.mif 3D.mif -axes 0,1,2", + "Sometimes in the process of extracting or calculating a single " + "3D volume from a 4D image series, the size of the image reported " + "by mrinfo will be \"X x Y x Z x 1\", indicating that the resulting " + "image is in fact also 4D, it just happens to contain only one " + "volume. This example demonstrates how to convert this into a " + "genuine 3D image (i.e. mrinfo will report the size as \"X x Y x Z\".") + + + Example ("Insert an axis of size 1 into the image", + "mrconvert XYZD.mif XYZ1D.mif -axes 0,1,2,-1,3", + "This example uses the value -1 as a flag to indicate to mrconvert " + "where a new axis of unity size is to be inserted. In this particular " + "example, the input image has four axes: the spatial axes X, Y and Z, " + "and some form of data D is stored across the fourth axis (i.e. " + "volumes). Due to insertion of a new axis, the output image is 5D: " + "the three spatial axes (XYZ), a single volume (the size of the " + "output image along the fourth axis will be 1), and data D " + "will be stored as volume groups along the fifth axis of the image.") + + + Example ("Manually reset the data scaling parameters stored within the image header to defaults", + "mrconvert with_scaling.mif without_scaling.mif -scaling 0.0,1.0", + "This command-line option alters the parameters stored within the image " + "header that provide a linear mapping from raw intensity values stored " + "in the image data to some other scale. Where the raw data stored in a " + "particular voxel is I, the value within that voxel is interpreted as: " + "value = offset + (scale x I). To adjust this scaling, the relevant " + "parameters must be provided as a comma-separated 2-vector of " + "floating-point values, in the format \"offset,scale\" (no quotation " + "marks). This particular example sets the offset to zero and the scale " + "to one, which equates to no rescaling of the raw intensity data."); + ARGUMENTS + Argument ("input", "the input image.").type_image_in () @@ -110,13 +165,15 @@ void usage () + OptionGroup ("Options for manipulating fundamental image properties") + Option ("coord", - "retain data from the input image only at the coordinates specified") - .allow_multiple() + "retain data from the input image only at the coordinates " + "specified in the selection along the specified axis. The selection " + "argument expects a number sequence, which can also include the " + "'end' keyword.").allow_multiple() + Argument ("axis").type_integer (0) - + Argument ("coord").type_sequence_int() + + Argument ("selection").type_sequence_int() + Option ("vox", - "change the voxel dimensions of the output image") + "change the voxel dimensions reported in the output image header") + Argument ("sizes").type_sequence_float() + Option ("axes", @@ -153,24 +210,33 @@ void usage () + Argument ("key").type_text() + Argument ("value").type_text() + + Option ("copy_properties", + "clear all generic properties and replace with the properties from the image / file specified.") + + Argument ("source").type_various() + + Stride::Options + DataType::options() - + DWI::GradImportOptions (false) + + DWI::GradImportOptions () + + DWI::bvalue_scaling_option + + DWI::GradExportOptions() - + PhaseEncoding::ImportOptions - + PhaseEncoding::ExportOptions; + + PhaseEncoding::ImportOptions + + PhaseEncoding::ExportOptions; } + + + void permute_DW_scheme (Header& H, const vector& axes) { - auto in = DWI::get_DW_scheme (H); + auto in = DWI::parse_DW_scheme (H); if (!in.rows()) return; @@ -259,9 +325,11 @@ inline vector set_header (Header& header, const ImageType& input) opt = get_options ("vox"); if (opt.size()) { - vector vox = opt[0][0]; + vector vox = parse_floats (opt[0][0]); if (vox.size() > header.ndim()) throw Exception ("too many axes supplied to -vox option"); + if (vox.size() == 1) + vox.resize (3, vox[0]); for (size_t n = 0; n < vox.size(); ++n) { if (std::isfinite (vox[n])) header.spacing(n) = vox[n]; @@ -277,11 +345,12 @@ inline vector set_header (Header& header, const ImageType& input) + template void copy_permute (const InputType& in, Header& header_out, const std::string& output_filename) { const auto axes = set_header (header_out, in); - auto out = Image::create (output_filename, header_out); + auto out = Image::create (output_filename, header_out, add_to_command_history); DWI::export_grad_commandline (out); PhaseEncoding::export_commandline (out); auto perm = Adapter::make (in, axes); @@ -317,6 +386,15 @@ void extract (Header& header_in, Header& header_out, const vector>& void run () { Header header_in = Header::open (argument[0]); + Eigen::MatrixXd dw_scheme; + try { + dw_scheme = DWI::get_DW_scheme (header_in, DWI::get_cmdline_bvalue_scaling_behaviour()); + } + catch (Exception& e) { + if (get_options ("grad").size() || get_options ("fslgrad").size() || get_options ("bvalue_scaling").size()) + throw; + e.display (2); + } Header header_out (header_in); header_out.datatype() = DataType::from_command_line (header_out.datatype()); @@ -324,9 +402,6 @@ void run () if (header_in.datatype().is_complex() && !header_out.datatype().is_complex()) WARN ("requested datatype is real but input datatype is complex - imaginary component will be ignored"); - if (get_options ("grad").size() || get_options ("fslgrad").size()) - DWI::set_DW_scheme (header_out, DWI::get_DW_scheme (header_in)); - if (get_options ("import_pe_table").size() || get_options ("import_pe_eddy").size()) PhaseEncoding::set_scheme (header_out, PhaseEncoding::get_scheme (header_in)); @@ -336,23 +411,50 @@ void run () + opt = get_options ("copy_properties"); + if (opt.size()) { + header_out.keyval().clear(); + if (str(opt[0][0]) != "NULL") { + try { + const Header source = Header::open (opt[0][0]); + header_out.keyval() = source.keyval(); + } catch (...) { + try { + File::JSON::load (header_out, opt[0][0]); + } catch (...) { + throw Exception ("Unable to obtain header key-value entries from spec \"" + str(opt[0][0]) + "\""); + } + } + } + } + opt = get_options ("clear_property"); for (size_t n = 0; n < opt.size(); ++n) { + if (str(opt[n][0]) == "command_history") + add_to_command_history = false; auto entry = header_out.keyval().find (opt[n][0]); if (entry == header_out.keyval().end()) { - WARN ("No header key/value entry \"" + opt[n][0] + "\" found; ignored"); + if (std::string(opt[n][0]) != "command_history") { + WARN ("No header key/value entry \"" + opt[n][0] + "\" found; ignored"); + } } else { header_out.keyval().erase (entry); } } opt = get_options ("set_property"); - for (size_t n = 0; n < opt.size(); ++n) + for (size_t n = 0; n < opt.size(); ++n) { + if (str(opt[n][0]) == "command_history") + add_to_command_history = false; header_out.keyval()[opt[n][0].as_text()] = opt[n][1].as_text(); + } opt = get_options ("append_property"); - for (size_t n = 0; n < opt.size(); ++n) + for (size_t n = 0; n < opt.size(); ++n) { + if (str(opt[n][0]) == "command_history") + add_to_command_history = false; add_line (header_out.keyval()[opt[n][0].as_text()], opt[n][1].as_text()); + } @@ -378,11 +480,11 @@ void run () header_out.size (axis) = pos[axis].size(); if (axis == 3) { - const auto grad = DWI::get_DW_scheme (header_in); + const auto grad = DWI::parse_DW_scheme (header_out); if (grad.rows()) { if ((ssize_t)grad.rows() != header_in.size(3)) { WARN ("Diffusion encoding of input file does not match number of image volumes; omitting gradient information from output image"); - header_out.keyval().erase ("dw_scheme"); + DWI::clear_DW_scheme (header_out); } else { Eigen::MatrixXd extract_grad (pos[3].size(), grad.cols()); for (size_t dir = 0; dir != pos[3].size(); ++dir) @@ -461,6 +563,6 @@ void run () opt = get_options ("json_export"); if (opt.size()) - File::JSON::save (header_out, opt[0][0]); + File::JSON::save (header_out, opt[0][0], argument[1]); } diff --git a/cmd/mrcrop.cpp b/cmd/mrcrop.cpp deleted file mode 100644 index 483fc23496..0000000000 --- a/cmd/mrcrop.cpp +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ - * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/ - */ - - -#include "command.h" -#include "image.h" -#include "algo/copy.h" -#include "adapter/subset.h" - - - -using namespace MR; -using namespace App; - - - -void usage () -{ - - AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; - - SYNOPSIS = "Crop an image to a reduced field of view"; - - DESCRIPTION - + "Extent of cropping can be determined using either manual setting of axis dimensions, or a computed mask image corresponding to the brain." - + "If using a mask, a gap of 1 voxel will be left at all 6 edges of the image such that trilinear interpolation upon the resulting images is still valid." - + "This is useful for axially-acquired brain images, where the image size can be reduced by a factor of 2 by removing the empty space on either side of the brain."; - - ARGUMENTS - + Argument ("image_in", "the image to be cropped").type_image_in() - + Argument ("image_out", "the output path for the resulting cropped image").type_image_out(); - - - OPTIONS - - + Option ("mask", "crop the input image according to the spatial extent of a mask image") - + Argument ("image", "the mask image").type_image_in() - - + Option ("axis", "crop the input image in the provided axis").allow_multiple() - + Argument ("index", "the index of the image axis to be cropped").type_integer (0, 2) - + Argument ("start", "the first voxel along this axis to be included in the output image").type_integer (0) - + Argument ("end", "the last voxel along this axis to be included in the output image").type_integer (0); - -} - - - - -void run () -{ - - auto in = Image::open (argument[0]); - - vector> bounds (in.ndim(), vector (2)); - for (size_t axis = 0; axis < in.ndim(); axis++) { - bounds[axis][0] = 0; - bounds[axis][1] = in.size (axis) - 1; - } - - auto opt = get_options ("mask"); - if (opt.size()) { - - auto mask = Image::open (opt[0][0]); - check_dimensions (in, mask, 0, 3); - - for (size_t axis = 0; axis != 3; ++axis) { - bounds[axis][0] = in.size (axis); - bounds[axis][1] = 0; - } - - struct BoundsCheck { NOMEMALIGN - vector>& overall_bounds; - vector> bounds; - BoundsCheck (vector>& overall_bounds) : overall_bounds (overall_bounds), bounds (overall_bounds) { } - ~BoundsCheck () { - for (size_t axis = 0; axis != 3; ++axis) { - overall_bounds[axis][0] = std::min (bounds[axis][0], overall_bounds[axis][0]); - overall_bounds[axis][1] = std::max (bounds[axis][1], overall_bounds[axis][1]); - } - } - void operator() (const decltype(mask)& m) { - if (m.value()) { - for (size_t axis = 0; axis != 3; ++axis) { - bounds[axis][0] = std::min (bounds[axis][0], m.index(axis)); - bounds[axis][1] = std::max (bounds[axis][1], m.index(axis)); - } - } - } - }; - - // Note that even though only 3 dimensions are cropped when using a mask, the bounds - // are computed by checking the extent for all dimensions (for example a 4D AFD mask) - ThreadedLoop (mask).run (BoundsCheck (bounds), mask); - - for (size_t axis = 0; axis != 3; ++axis) { - if (bounds[axis][0] > bounds[axis][1]) - throw Exception ("mask image is empty; can't use to crop image"); - if (bounds[axis][0]) - --bounds[axis][0]; - if (bounds[axis][1] < mask.size (axis) - 1) - ++bounds[axis][1]; - } - - } - - opt = get_options ("axis"); - for (size_t i = 0; i != opt.size(); ++i) { - // Manual cropping of axis overrides mask image bounds - const ssize_t axis = opt[i][0]; - const ssize_t start = opt[i][1]; - const ssize_t end = opt[i][2]; - if (start < 0 || end >= in.size(axis)) - throw Exception ("Index supplied for axis " + str(axis) + " is out of bounds"); - if (end < start) - throw Exception ("End index supplied for axis " + str(axis) + " is less than start index"); - bounds[axis][0] = start; - bounds[axis][1] = end; - } - - vector from (in.ndim()); - vector size (in.ndim()); - for (size_t axis = 0; axis < in.ndim(); axis++) { - from[axis] = bounds[axis][0]; - size[axis] = bounds[axis][1] - from[axis] + 1; - } - - auto cropped = Adapter::make (in, from, size); - auto out = Image::create (argument[1], cropped); - threaded_copy_with_progress_message ("cropping image", cropped, out); -} - diff --git a/cmd/mrdegibbs.cpp b/cmd/mrdegibbs.cpp index c6af0a83ff..e8b4cd47d0 100644 --- a/cmd/mrdegibbs.cpp +++ b/cmd/mrdegibbs.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "axes.h" @@ -37,8 +38,8 @@ void usage () + "This command is designed to run on data directly after it has been reconstructed by the scanner, " "before any interpolation of any kind has taken place. You should not run this command after any " - "form of motion correction (e.g. not after dwipreproc). Similarly, if you intend running dwidenoise, " - "you should run this command afterwards, since it has the potential to alter the noise structure, " + "form of motion correction (e.g. not after dwifslpreproc). Similarly, if you intend running dwidenoise, " + "you should run denoising before this command to not alter the noise structure, " "which would impact on dwidenoise's performance." + "Note that this method is designed to work on images acquired with full k-space coverage. " @@ -318,24 +319,28 @@ void run () const bool axes_set_manually = opt.size(); if (opt.size()) { vector axes = opt[0][0]; - if (slice_axes.size() != 2) + if (axes.size() != 2) throw Exception ("slice axes must be specified as a comma-separated 2-vector"); + if (size_t(std::max (axes[0], axes[1])) >= header.ndim()) + throw Exception ("slice axes must be within the dimensionality of the image"); + if (axes[0] == axes[1]) + throw Exception ("two independent slice axes must be specified"); slice_axes = { size_t(axes[0]), size_t(axes[1]) }; } auto slice_encoding_it = header.keyval().find ("SliceEncodingDirection"); if (slice_encoding_it != header.keyval().end()) { try { - const Eigen::Vector3 slice_endocing_axis_onehot = Axes::id2dir (slice_encoding_it->second); + const Eigen::Vector3 slice_encoding_axis_onehot = Axes::id2dir (slice_encoding_it->second); vector auto_slice_axes = { 0, 0 }; - if (slice_endocing_axis_onehot[0]) + if (slice_encoding_axis_onehot[0]) auto_slice_axes = { 1, 2 }; - else if (slice_endocing_axis_onehot[1]) + else if (slice_encoding_axis_onehot[1]) auto_slice_axes = { 0, 2 }; - else if (slice_endocing_axis_onehot[2]) + else if (slice_encoding_axis_onehot[2]) auto_slice_axes = { 0, 1 }; else - throw Exception ("Fatal error: Invalid slice axis one-hot encoding [ " + str(slice_endocing_axis_onehot.transpose()) + " ]"); + throw Exception ("Fatal error: Invalid slice axis one-hot encoding [ " + str(slice_encoding_axis_onehot.transpose()) + " ]"); if (axes_set_manually) { if (slice_axes == auto_slice_axes) { INFO ("User's manual selection of within-slice axes consistent with \"SliceEncodingDirection\" field in image header"); diff --git a/cmd/mrdump.cpp b/cmd/mrdump.cpp index add0700bbb..acae44f383 100644 --- a/cmd/mrdump.cpp +++ b/cmd/mrdump.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "datatype.h" #include "image.h" diff --git a/cmd/mredit.cpp b/cmd/mredit.cpp index 0b90b0d385..6f27f9c90c 100644 --- a/cmd/mredit.cpp +++ b/cmd/mredit.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" diff --git a/cmd/mrfilter.cpp b/cmd/mrfilter.cpp index af46e20458..45e379dd10 100644 --- a/cmd/mrfilter.cpp +++ b/cmd/mrfilter.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" @@ -23,13 +24,14 @@ #include "filter/normalise.h" #include "filter/median.h" #include "filter/smooth.h" +#include "filter/zclean.h" using namespace MR; using namespace App; -const char* filters[] = { "fft", "gradient", "median", "smooth", "normalise", nullptr }; +const char* filters[] = { "fft", "gradient", "median", "smooth", "normalise", "zclean", NULL }; const OptionGroup FFTOption = OptionGroup ("Options for FFT filter") @@ -100,6 +102,18 @@ const OptionGroup SmoothOption = OptionGroup ("Options for smooth filter") "The default extent is 2 * ceil(2.5 * stdev / voxel_size) - 1.") + Argument ("voxels").type_sequence_int(); +const OptionGroup ZcleanOption = OptionGroup ("Options for zclean filter") ++ Option ("zupper", "define high intensity outliers: default: 2.5") + + Argument ("num").type_float(0.1, std::numeric_limits::infinity()) ++ Option ("zlower", "define low intensity outliers: default: 2.5") + + Argument ("num").type_float(0.1, std::numeric_limits::infinity()) ++ Option ("bridge", "number of voxels to gap to fill holes in mask: default: 4") + + Argument ("num").type_integer(0) ++ Option ("maskin", "initial mask that defines the maximum spatial extent and the region from " + "which to smaple the intensity range.") + + Argument ("image").type_image_in() ++ Option ("maskout", "Output a refined mask based on a spatially coherent region with normal intensity range.") + + Argument ("image").type_image_out(); void usage () @@ -109,7 +123,7 @@ void usage () SYNOPSIS = "Perform filtering operations on 3D / 4D MR images"; DESCRIPTION - + "The available filters are: fft, gradient, median, smooth, normalise." + + "The available filters are: fft, gradient, median, smooth, normalise, zclean." + "Each filter has its own unique set of optional parameters." + "For 4D images, each 3D volume is processed independently."; @@ -124,6 +138,7 @@ void usage () + MedianOption + NormaliseOption + SmoothOption + + ZcleanOption + Stride::Options; } @@ -257,6 +272,37 @@ void run () { break; } + // Zclean + case 5: + { + auto input = Image::open (argument[0]); + Filter::ZClean filter (input); + + auto opt = get_options ("maskin"); + if (!opt.size()) + throw Exception (std::string(argument[1]) + " filter requires initial mask"); + Image maskin = Image::open (opt[0][0]); + check_dimensions (maskin, input, 0, 3); + + filter.set_message (std::string("applying ") + std::string(argument[1]) + " filter to image " + std::string(argument[0]) + "..."); + Stride::set_from_command_line (filter); + + filter.set_voxels_to_bridge (get_option_value ("bridge", 4)); + float zlower = get_option_value ("zlower", 2.5); + float zupper = get_option_value ("zupper", 2.5); + filter.set_zlim (zlower, zupper); + + auto output = Image::create (argument[2], filter); + filter (input, maskin, output); + + opt = get_options ("maskout"); + if (opt.size()) { + auto maskout = Image::create (opt[0][0], filter.mask); + threaded_copy (filter.mask, maskout); + } + break; + } + default: assert (0); break; diff --git a/cmd/mrgrid.cpp b/cmd/mrgrid.cpp new file mode 100644 index 0000000000..b5c218ebae --- /dev/null +++ b/cmd/mrgrid.cpp @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * + * MRtrix3 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/ + */ + +#include +#include "command.h" +#include "image.h" +#include "filter/resize.h" +#include "filter/reslice.h" +#include "interp/nearest.h" +#include "interp/linear.h" +#include "interp/cubic.h" +#include "interp/sinc.h" +#include "progressbar.h" +#include "algo/copy.h" +#include "adapter/regrid.h" + + +using namespace MR; +using namespace App; + +const char* interp_choices[] = { "nearest", "linear", "cubic", "sinc", NULL }; +const char* operation_choices[] = { "regrid", "crop", "pad", NULL }; + + +void usage () +{ + AUTHOR = "Max Pietsch (maximilian.pietsch@kcl.ac.uk) & David Raffelt (david.raffelt@florey.edu.au) & Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Modify the grid of an image without interpolation (cropping or padding) or by regridding to an image grid with modified orientation, location and or resolution. The image content remains in place in real world coordinates."; + + DESCRIPTION + + "- regrid: This operation performs changes of the voxel grid that require interpolation of the image such as changing the resolution or location and orientation of the voxel grid. " + "If the image is down-sampled, the appropriate smoothing is automatically applied using Gaussian smoothing unless nearest neighbour interpolation is selected or oversample is changed explicitly. The resolution can only be changed for spatial dimensions. " + + "- crop: The image extent after cropping, can be specified either manually for each axis dimensions, or via a mask or reference image. " + "The image can be cropped to the extent of a mask. " + "This is useful for axially-acquired brain images, where the image size can be reduced by a factor of 2 by removing the empty space on either side of the brain. Note that cropping does not extend the image beyond the original FOV unless explicitly specified (via -crop_unbound or negative -axis extent)." + + "- pad: Analogously to cropping, padding increases the FOV of an image without image interpolation. Pad and crop can be performed simultaneously by specifying signed specifier argument values to the -axis option." + + "This command encapsulates and extends the functionality of the superseded commands 'mrpad', 'mrcrop' and 'mrresize'. Note the difference in -axis convention used for 'mrcrop' and 'mrpad' (see -axis option description)."; + + EXAMPLES + + Example ("Crop and pad the first axis", + "mrgrid in.mif crop -axis 0 10,-5 out.mif", + "This removes 10 voxels on the lower and pads with 5 on the upper bound, which is equivalent to " + "padding with the negated specifier (mrgrid in.mif pad -axis 0 -10,5 out.mif).") + + + Example ("Right-pad the image to the number of voxels of a reference image", + "mrgrid in.mif pad -as ref.mif -all_axes -axis 3 0,0 out.mif -fill nan", + "This pads the image on the upper bound of all axes except for the volume dimension. " + "The headers of in.mif and ref.mif are ignored and the output image uses NAN values to fill in voxels outside the original range of in.mif.") + + + Example ("Regrid and interpolate to match the voxel grid of a reference image", + "mrgrid in.mif regrid -template ref.mif -scale 1,1,0.5 out.mif -fill nan", + "The -template instructs to regrid in.mif to match the voxel grid of ref.mif (voxel size, grid orientation and voxel centres). " + "The -scale option overwrites the voxel scaling factor yielding voxel sizes in the third dimension that are " + "twice as coarse as those of the template image."); + + + ARGUMENTS + + Argument ("input", "input image to be regridded.").type_image_in () + + Argument ("operation", "the operation to be performed, one of: " + join(operation_choices, ", ") + ".").type_choice (operation_choices) + + Argument ("output", "the output image.").type_image_out (); + + OPTIONS + + OptionGroup ("Regridding options (involves image interpolation, applied to spatial axes only)") + + Option ("template", "match the input image grid (voxel spacing, image size, header transformation) to that of a reference image. " + "The image resolution relative to the template image can be changed with one of -size, -voxel, -scale." ) + + Argument ("image").type_image_in () + + + Option ("size", "define the size (number of voxels) in each spatial dimension for the output image. " + "This should be specified as a comma-separated list.") + + Argument ("dims").type_sequence_int() + + + Option ("voxel", "define the new voxel size for the output image. " + "This can be specified either as a single value to be used for all spatial dimensions, " + "or as a comma-separated list of the size for each voxel dimension.") + + Argument ("size").type_sequence_float() + + + Option ("scale", "scale the image resolution by the supplied factor. " + "This can be specified either as a single value to be used for all dimensions, " + "or as a comma-separated list of scale factors for each dimension.") + + Argument ("factor").type_sequence_float() + + + Option ("interp", "set the interpolation method to use when reslicing (choices: nearest, linear, cubic, sinc. Default: cubic).") + + Argument ("method").type_choice (interp_choices) + + + Option ("oversample", + "set the amount of over-sampling (in the target space) to perform when regridding. This is particularly " + "relevant when downsamping a high-resolution image to a low-resolution image, to avoid aliasing artefacts. " + "This can consist of a single integer, or a comma-separated list of 3 integers if different oversampling " + "factors are desired along the different axes. Default is determined from ratio of voxel dimensions (disabled " + "for nearest-neighbour interpolation).") + + Argument ("factor").type_sequence_int() + + + OptionGroup ("Pad and crop options (no image interpolation is performed, header transformation is adjusted)") + + Option ("as", "pad or crop the input image on the upper bound to match the specified reference image grid. " + "This operation ignores differences in image transformation between input and reference image.") + + Argument ("reference image").type_image_in () + + + Option ("uniform", "pad or crop the input image by a uniform number of voxels on all sides") + + Argument ("number").type_integer () + + + Option ("mask", "crop the input image according to the spatial extent of a mask image. " + "The mask must share a common voxel grid with the input image but differences in image transformations are " + "ignored. Note that even though only 3 dimensions are cropped when using a mask, the bounds are computed by " + "checking the extent for all dimensions. " + "Note that by default a gap of 1 voxel is left at all edges of the image to allow valid trilinear interpolation. " + "This gap can be modified with the -uniform option but by default it does not extend beyond the FOV unless -crop_unbound is used.") + + Argument ("image", "the mask image. ").type_image_in() + + + Option ("crop_unbound", "Allow padding beyond the original FOV when cropping.") + + + Option ("axis", "pad or crop the input image along the provided axis (defined by index). The specifier argument " + "defines the number of voxels added or removed on the lower or upper end of the axis (-axis index delta_lower,delta_upper) " + "or acts as a voxel selection range (-axis index start:stop). In both modes, values are relative to the input image " + "(overriding all other extent-specifying options). Negative delta specifier values trigger the inverse operation " + "(pad instead of crop and vice versa) and negative range specifier trigger padding. " + "Note that the deprecated commands 'mrcrop' and 'mrpad' used range-based and delta-based -axis indices, respectively." + ).allow_multiple() + + Argument ("index").type_integer (0) + + Argument ("spec").type_text() + + + Option ("all_axes", "Crop or pad all, not just spatial axes.") + + + OptionGroup ("General options") + + Option ("fill", "Use number as the out of bounds value. nan, inf and -inf are valid arguments. (Default: 0.0)") + + Argument ("number").type_float () + + + Stride::Options + + DataType::options(); +} + + +void run () { + auto input_header = Header::open (argument[0]); + + const int op = argument[1]; + + // Out of bounds value + default_type out_of_bounds_value = 0.0; + auto opt = get_options ("fill"); + if (opt.size()) + out_of_bounds_value = opt[0][0]; + + if (op == 0) { // regrid + INFO("operation: " + str(operation_choices[op])); + Filter::Resize regrid_filter (input_header); + regrid_filter.set_out_of_bounds_value(out_of_bounds_value); + size_t resize_option_count = 0; + size_t template_option_count = 0; + + int interp = 2; // cubic + opt = get_options ("interp"); + if (opt.size()) { + interp = opt[0][0]; + } + + // over-sampling + vector oversample = Adapter::AutoOverSample; + opt = get_options ("oversample"); + if (opt.size()) { + oversample = opt[0][0]; + } + + Header template_header; + opt = get_options ("template"); + if (opt.size()) { + template_header = Header::open(opt[0][0]); + if (template_header.ndim() < 3) + throw Exception ("the template image requires at least 3 spatial dimensions"); + add_line (regrid_filter.keyval()["comments"], std::string ("regridded to template image \"" + template_header.name() + "\"")); + for (auto i=0; i<3; ++i) { + regrid_filter.spacing(i) = template_header.spacing(i); + regrid_filter.size(i) = template_header.size(i); + } + regrid_filter.set_transform(template_header.transform()); + ++template_option_count; + } + + regrid_filter.set_interp_type (interp); + regrid_filter.set_oversample (oversample); + + vector scale; + opt = get_options ("scale"); + if (opt.size()) { + scale = parse_floats (opt[0][0]); + if (scale.size() == 1) + scale.resize (3, scale[0]); + regrid_filter.set_scale_factor (scale); + ++resize_option_count; + } + + vector image_size; + opt = get_options ("size"); + if (opt.size()) { + image_size = parse_ints (opt[0][0]); + regrid_filter.set_size (image_size); + ++resize_option_count; + } + + vector voxel_size; + opt = get_options ("voxel"); + if (opt.size()) { + voxel_size = parse_floats (opt[0][0]); + if (voxel_size.size() == 1) + voxel_size.resize (3, voxel_size[0]); + regrid_filter.set_voxel_size (voxel_size); + ++resize_option_count; + } + + if (!resize_option_count and !template_option_count) + throw Exception ("please use either the -scale, -voxel, -resolution or -template option to regrid the image"); + if (resize_option_count > 1) + throw Exception ("only a single method can be used to resize the image (image resolution, voxel size or scale factor)"); + + Header output_header (regrid_filter); + Stride::set_from_command_line (output_header); + if (interp == 0) + output_header.datatype() = DataType::from_command_line (input_header.datatype()); + else + output_header.datatype() = DataType::from_command_line (DataType::from ()); + auto output = Image::create (argument[2], output_header); + + auto input = input_header.get_image(); + regrid_filter (input, output); + + } else { // crop or pad + const bool do_crop = op == 1; + std::string message = do_crop ? "cropping image" : "padding image"; + INFO("operation: " + str(operation_choices[op])); + + if (get_options ("crop_unbound").size() && !do_crop) + throw Exception("-crop_unbound only applies only to the crop operation"); + + const size_t nd = get_options ("nd").size() ? input_header.ndim() : 3; + + vector> bounds (input_header.ndim(), vector (2)); + for (size_t axis = 0; axis < input_header.ndim(); axis++) { + bounds[axis][0] = 0; + bounds[axis][1] = input_header.size (axis) - 1; + } + + size_t crop_pad_option_count = 0; + + opt = get_options ("mask"); + if (opt.size()) { + if (!do_crop) throw Exception("padding with -mask option is not supported"); + INFO("cropping to mask"); + ++crop_pad_option_count; + auto mask = Image::open (opt[0][0]); + check_dimensions (input_header, mask, 0, 3); + + for (size_t axis = 0; axis != 3; ++axis) { + bounds[axis][0] = input_header.size (axis); + bounds[axis][1] = 0; + } + + struct BoundsCheck { NOMEMALIGN + vector>& overall_bounds; + vector> bounds; + BoundsCheck (vector>& overall_bounds) : overall_bounds (overall_bounds), bounds (overall_bounds) { } + ~BoundsCheck () { + for (size_t axis = 0; axis != 3; ++axis) { + overall_bounds[axis][0] = std::min (bounds[axis][0], overall_bounds[axis][0]); + overall_bounds[axis][1] = std::max (bounds[axis][1], overall_bounds[axis][1]); + } + } + void operator() (const decltype(mask)& m) { + if (m.value()) { + for (size_t axis = 0; axis != 3; ++axis) { + bounds[axis][0] = std::min (bounds[axis][0], m.index(axis)); + bounds[axis][1] = std::max (bounds[axis][1], m.index(axis)); + } + } + } + }; + + ThreadedLoop (mask).run (BoundsCheck (bounds), mask); + for (size_t axis = 0; axis != 3; ++axis) { + if ((input_header.size (axis) - 1) != bounds[axis][1] or bounds[axis][0] != 0) + INFO ("cropping to mask changes axis " + str(axis) + " extent from 0:" + str(input_header.size (axis) - 1) + + " to " + str(bounds[axis][0]) + ":" + str(bounds[axis][1])); + } + if (!get_options ("uniform").size()) { + INFO ("uniformly padding around mask by 1 voxel"); + // margin of 1 voxel around mask + for (size_t axis = 0; axis != 3; ++axis) { + bounds[axis][0] -= 1; + bounds[axis][1] += 1; + } + } + } + + opt = get_options ("as"); + if (opt.size()) { + if (crop_pad_option_count) + throw Exception (str(operation_choices[op]) + " can be performed using either a mask or a template image"); + ++crop_pad_option_count; + + Header template_header = Header::open(opt[0][0]); + + for (size_t axis = 0; axis != nd; ++axis) { + if (axis >= template_header.ndim()) { + if (do_crop) + bounds[axis][1] = 0; + } else { + if (do_crop) + bounds[axis][1] = std::min (bounds[axis][1], template_header.size(axis) - 1); + else + bounds[axis][1] = std::max (bounds[axis][1], template_header.size(axis) - 1); + } + } + } + + opt = get_options ("uniform"); + if (opt.size()) { + ++crop_pad_option_count; + ssize_t val = opt[0][0]; + INFO ("uniformly " + str(do_crop ? "cropping" : "padding") + " by " + str(val) + " voxels"); + for (size_t axis = 0; axis < nd; axis++) { + bounds[axis][0] += do_crop ? val : -val; + bounds[axis][1] += do_crop ? -val : val; + } + } + + if (do_crop && !get_options ("crop_unbound").size()) { + opt = get_options ("axis"); + std::set ignore; + for (size_t i = 0; i != opt.size(); ++i) + ignore.insert(opt[i][0]); + for (size_t axis = 0; axis != 3; ++axis) { + if (bounds[axis][0] < 0 || bounds[axis][1] > input_header.size(axis) - 1) { + if (ignore.find(axis) == ignore.end()) + INFO ("operation: crop without -crop_unbound: restricting padding on axis " + str(axis) + " to valid FOV " + + str(std::max (0, bounds[axis][0])) + ":" + str(std::min (bounds[axis][1], input_header.size(axis) - 1))); + bounds[axis][0] = std::max (0, bounds[axis][0]); + bounds[axis][1] = std::min (bounds[axis][1], input_header.size(axis) - 1); + } + } + } + + opt = get_options ("axis"); // overrides image bounds set by other options + for (size_t i = 0; i != opt.size(); ++i) { + ++crop_pad_option_count; + const size_t axis = opt[i][0]; + if (axis >= input_header.ndim()) + throw Exception ("-axis " + str(axis) + " larger than image dimensions (" + str(input_header.ndim()) + ")"); + std::string spec = str(opt[i][1]); + std::string::size_type start = 0, end; + end = spec.find_first_of(":", start); + if (end == std::string::npos) { // spec = delta_lower,delta_upper + vector delta; // 0: not changed, > 0: pad, < 0: crop + try { delta = parse_ints (opt[i][1]); } + catch (Exception& E) { Exception (E, "-axis " + str(axis) + ": can't parse delta specifier \"" + spec + "\""); } + if (delta.size() != 2) + throw Exception ("-axis " + str(axis) + ": can't parse delta specifier \"" + spec + "\""); + bounds[axis][0] = do_crop ? delta[0] : -delta[0]; + bounds[axis][1] = input_header.size (axis) - 1 + (do_crop ? -delta[1] : delta[1]); + } else { // spec = delta_lower:delta_upper + std::string token (strip (spec.substr (start, end-start))); + try { bounds[axis][0] = std::stoi(token); } + catch (Exception& E) { throw Exception (E, "-axis " + str(axis) + ": can't parse integer sequence specifier \"" + spec + "\""); } + token = strip (spec.substr (end+1)); + if (lowercase (token) == "end" || token.size() == 0) + bounds[axis][1] = input_header.size(axis) - 1; + else { + try { bounds[axis][1] = std::stoi(token); } + catch (Exception& E) { throw Exception (E, "-axis " + str(axis) + ": can't parse integer sequence specifier \"" + spec + "\""); } + } + } + } + + for (size_t axis = 0; axis != 3; ++axis) { + if (bounds[axis][1] < bounds[axis][0]) + throw Exception ("axis " + str(axis) + " is empty: (" + str(bounds[axis][0]) + ":" + str(bounds[axis][1]) + ")"); + } + + if (crop_pad_option_count == 0) + throw Exception ("no crop or pad option supplied"); + + vector from (input_header.ndim()); + vector size (input_header.ndim()); + for (size_t axis = 0; axis < input_header.ndim(); axis++) { + from[axis] = bounds[axis][0]; + size[axis] = bounds[axis][1] - from[axis] + 1; + } + + size_t changed_axes = 0; + for (size_t axis = 0; axis < nd; axis++) { + if (bounds[axis][0] != 0 || input_header.size (axis) != size[axis]) { + changed_axes++; + CONSOLE("changing axis " + str(axis) + " extent from 0:" + str(input_header.size (axis) - 1) + + " (n="+str(input_header.size (axis))+") to " + str(bounds[axis][0]) + ":" + str(bounds[axis][1]) + + " (n=" + str(size[axis]) + ")"); + } + } + if (!changed_axes) + WARN ("no axes were changed"); + + auto input = input_header.get_image(); + + auto regridded = Adapter::make (input, from, size, out_of_bounds_value); + Header output_header (regridded); + output_header.datatype() = DataType::from_command_line (DataType::from ()); + Stride::set_from_command_line (output_header); + + auto output = Image::create (argument[2], output_header); + threaded_copy_with_progress_message (message.c_str(), regridded, output); + } +} diff --git a/cmd/mrhistmatch.cpp b/cmd/mrhistmatch.cpp index 4cb65348eb..c0f0227878 100644 --- a/cmd/mrhistmatch.cpp +++ b/cmd/mrhistmatch.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include @@ -21,6 +22,7 @@ #include "header.h" #include "image.h" +#include "adapter/replicate.h" #include "algo/histogram.h" #include "algo/loop.h" @@ -74,29 +76,27 @@ void match_linear (Image& input, vector input_data, target_data; { ProgressBar progress ("Loading & sorting image data", 4); - if (mask_input.valid()) { - for (auto l = Loop(input) (input, mask_input); l; ++l) { - if (mask_input.value() && std::isfinite (static_cast(input.value()))) - input_data.push_back (input.value()); - } - } else { - for (auto l = Loop(input) (input); l; ++l) { - if (std::isfinite (static_cast(input.value()))) - input_data.push_back (input.value()); + + auto fill = [] (Image& image, Image& mask) { + vector data; + if (mask.valid()) { + Adapter::Replicate> mask_replicate (mask, image); + for (auto l = Loop(image) (image, mask_replicate); l; ++l) { + if (mask_replicate.value() && std::isfinite (static_cast(image.value()))) + data.push_back (image.value()); + } + } else { + for (auto l = Loop(image) (image); l; ++l) { + if (std::isfinite (static_cast(image.value()))) + data.push_back (image.value()); + } } - } + return data; + }; + + input_data = fill (input, mask_input); ++progress; - if (mask_target.valid()) { - for (auto l = Loop(target) (target, mask_target); l; ++l) { - if (mask_target.value() && std::isfinite (static_cast(target.value()))) - target_data.push_back (target.value()); - } - } else { - for (auto l = Loop(target) (target); l; ++l) { - if (std::isfinite (static_cast(target.value()))) - target_data.push_back (target.value()); - } - } + target_data = fill (target, mask_target); ++progress; std::sort (input_data.begin(), input_data.end()); ++progress; @@ -194,19 +194,17 @@ void run () { auto input = Image::open (argument[1]); auto target = Image::open (argument[2]); - if (input.ndim() > 3 || target.ndim() > 3) - throw Exception ("mrhistmatch currently only works on 3D images"); Image mask_input, mask_target; auto opt = get_options ("mask_input"); if (opt.size()) { mask_input = Image::open (opt[0][0]); - check_dimensions (input, mask_input); + check_dimensions (input, mask_input, 0, 3); } opt = get_options ("mask_target"); if (opt.size()) { mask_target = Image::open (opt[0][0]); - check_dimensions (target, mask_target); + check_dimensions (target, mask_target, 0, 3); } switch (int(argument[0])) { diff --git a/cmd/mrhistogram.cpp b/cmd/mrhistogram.cpp index 18f9da4a44..c3716781f1 100644 --- a/cmd/mrhistogram.cpp +++ b/cmd/mrhistogram.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "header.h" #include "image.h" @@ -115,6 +116,7 @@ void run () } File::OFStream output (argument[1]); + output << "# " << App::command_history_string << "\n"; Algo::Histogram::Calibrator calibrator (nbins, get_options ("ignorezero").size()); opt = get_options ("template"); @@ -130,12 +132,11 @@ void run () header.datatype().is_integer() && header.intensity_offset() == 0.0 && header.intensity_scale() == 1.0); } nbins = calibrator.get_num_bins(); - if (nbins == 0) { - std::string message; - message.append(std::string("Zero bins selected") + (get_options ("ignorezero").size() or get_options ("bins").size()? - "." :", you might want to use the -ignorezero or -bins option.")); - WARN(message); - } + if (!nbins) + throw Exception (std::string("No histogram bins constructed") + + ((get_options ("ignorezero").size() || get_options ("bins").size()) ? + "." : + "; you might want to use the -ignorezero or -bins option.")); for (size_t i = 0; i != nbins; ++i) output << (calibrator.get_min() + ((i+0.5) * calibrator.get_bin_width())) << ","; diff --git a/cmd/mrinfo.cpp b/cmd/mrinfo.cpp index 32d1861016..cdfb1d2310 100644 --- a/cmd/mrinfo.cpp +++ b/cmd/mrinfo.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include @@ -21,6 +22,7 @@ #include "phase_encoding.h" #include "types.h" #include "file/json.h" +#include "file/json_utils.h" #include "dwi/gradient.h" @@ -65,13 +67,23 @@ void usage () + "The command can also write the diffusion gradient table from a single input image to file; " "either in the MRtrix or FSL format (bvecs/bvals file pair; includes appropriate diffusion " - "gradient vector reorientation)"; + "gradient vector reorientation)" + + + "The -dwgrad, -export_* and -shell_* options provide (information about) " + "the diffusion weighting gradient table after it has been processed by " + "the MRtrix3 back-end (vectors normalised, b-values scaled by the square " + "of the vector norm, depending on the -bvalue_scaling option). To see the " + "raw gradient table information as stored in the image header, i.e. without " + "MRtrix3 back-end processing, use \"-property dw_scheme\"." + + + DWI::bvalue_scaling_description; ARGUMENTS + Argument ("image", "the input image(s).").allow_multiple().type_image_in(); OPTIONS + Option ("all", "print all properties, rather than the first and last 2 of each.") + + Option ("name", "print the file system path of the image") + Option ("format", "image file format") + Option ("ndim", "number of image dimensions") + Option ("size", "image size along each axis") @@ -80,27 +92,21 @@ void usage () + Option ("strides", "data strides i.e. order and direction of axes data layout") + Option ("offset", "image intensity offset") + Option ("multiplier", "image intensity multiplier") - + Option ("transform", "the voxel to image transformation") - - + NoRealignOption + + Option ("transform", "the transformation from image coordinates [mm] to scanner / real world coordinates [mm]") + FieldExportOptions - + GradImportOptions - + Option ("raw_dwgrad", - "do not modify the gradient table from what was found in the image headers. This skips the " - "validation steps normally performed within MRtrix applications (i.e. do not verify that " - "the number of entries in the gradient table matches the number of volumes in the image, " - "do not scale b-values by gradient norms, do not normalise gradient vectors)") + + DWI::GradImportOptions() + + DWI::bvalue_scaling_option + GradExportOptions - + Option ("dwgrad", "the diffusion-weighting gradient table, as stored in the header " - "(i.e. without any interpretation, scaling of b-values, or normalisation of gradient vectors)") + + Option ("dwgrad", "the diffusion-weighting gradient table, as interpreted by MRtrix3") + Option ("shell_bvalues", "list the average b-value of each shell") + Option ("shell_sizes", "list the number of volumes in each shell") + + Option ("shell_indices", "list the image volumes attributed to each b-value shell") + PhaseEncoding::ExportOptions - + Option ("petable", "print the phase encoding table"); + + Option ("petable", "print the phase encoding table"); } @@ -142,9 +148,9 @@ void print_strides (const Header& header) std::cout << buffer << "\n"; } -void print_shells (const Header& header, const bool shell_bvalues, const bool shell_sizes) +void print_shells (const Eigen::MatrixXd& grad, const bool shell_bvalues, const bool shell_sizes, const bool shell_indices) { - DWI::Shells dwshells (DWI::parse_DW_scheme (header)); + DWI::Shells dwshells (grad); if (shell_bvalues) { for (size_t i = 0; i < dwshells.count(); i++) std::cout << dwshells[i].get_mean() << " "; @@ -155,6 +161,11 @@ void print_shells (const Header& header, const bool shell_bvalues, const bool sh std::cout << dwshells[i].count() << " "; std::cout << "\n"; } + if (shell_indices) { + for (size_t i = 0; i < dwshells.count(); i++) + std::cout << join(dwshells[i].get_volumes(), ",") + " "; + std::cout << "\n"; + } } void print_transform (const Header& header) @@ -190,39 +201,6 @@ void print_properties (const Header& header, const std::string& key, const size_ } } -template -void keyval2json (const Header& header, JSON& json) -{ - for (const auto& kv : header.keyval()) { - // Text entries that in fact contain matrix / vector data will be - // converted to numerical matrices / vectors and written as such - try { - const auto M = parse_matrix (kv.second); - if (M.rows() == 1 && M.cols() == 1) - throw Exception ("Single scalar value rather than a matrix"); - for (ssize_t row = 0; row != M.rows(); ++row) { - vector data (M.cols()); - for (ssize_t i = 0; i != M.cols(); ++i) - data[i] = M (row, i); - if (json.find (kv.first) == json.end()) - json[kv.first] = { data }; - else - json[kv.first].push_back (data); - } - } catch (...) { - if (json.find (kv.first) == json.end()) { - json[kv.first] = kv.second; - } else if (json[kv.first] != kv.second) { - // If the value for this key differs between images, turn the JSON entry into an array - if (json[kv.first].is_array()) - json[kv.first].push_back (kv.second); - else - json[kv.first] = { json[kv.first], kv.second }; - } - } - } -} - void header2json (const Header& header, nlohmann::json& json) { // Capture _all_ header fields, not just the optional key-value pairs @@ -248,7 +226,7 @@ void header2json (const Header& header, nlohmann::json& json) { T(2,0), T(2,1), T(2,2), T(2,3) }, { 0.0, 0.0, 0.0, 1.0 } }; // Load key-value entries into a nested keyval.* member - keyval2json (header, json["keyval"]); + File::JSON::write (header, json["keyval"], header.name()); } @@ -258,7 +236,12 @@ void header2json (const Header& header, nlohmann::json& json) void run () { - auto check_option_group = [](const App::OptionGroup& g) { for (auto o: g) if (get_options (o.id).size()) return true; return false; }; + auto check_option_group = [](const App::OptionGroup& g) { + for (auto o : g) + if (get_options (o.id).size()) + return true; + return false; + }; const bool export_grad = check_option_group (GradExportOptions); const bool export_pe = check_option_group (PhaseEncoding::ExportOptions); @@ -271,9 +254,10 @@ void run () std::unique_ptr json_keyval (get_options ("json_keyval").size() ? new nlohmann::json : nullptr); std::unique_ptr json_all (get_options ("json_all").size() ? new nlohmann::json : nullptr); - if (get_options ("norealign").size()) - Header::do_not_realign_transform = true; + if (json_all && argument.size() > 1) + throw Exception ("Cannot use -json_all option with multiple input images"); + const bool name = get_options("name") .size(); const bool format = get_options("format") .size(); const bool ndim = get_options("ndim") .size(); const bool size = get_options("size") .size(); @@ -287,21 +271,19 @@ void run () const bool dwgrad = get_options("dwgrad") .size(); const bool shell_bvalues = get_options("shell_bvalues") .size(); const bool shell_sizes = get_options("shell_sizes") .size(); - const bool raw_dwgrad = get_options("raw_dwgrad") .size(); + const bool shell_indices = get_options("shell_indices") .size(); const bool petable = get_options("petable") .size(); const bool print_full_header = !(format || ndim || size || spacing || datatype || strides || offset || multiplier || properties.size() || transform || - dwgrad || export_grad || shell_bvalues || shell_sizes || export_pe || petable || + dwgrad || export_grad || shell_bvalues || shell_sizes || shell_indices || + export_pe || petable || json_keyval || json_all); for (size_t i = 0; i < argument.size(); ++i) { - auto header = Header::open (argument[i]); - if (raw_dwgrad) - DWI::set_DW_scheme (header, DWI::get_DW_scheme (header)); - else if (export_grad || check_option_group (GradImportOptions) || dwgrad || shell_bvalues || shell_sizes) - DWI::set_DW_scheme (header, DWI::get_valid_DW_scheme (header, true)); + const auto header = Header::open (argument[i]); + if (name) std::cout << header.name() << "\n"; if (format) std::cout << header.format() << "\n"; if (ndim) std::cout << header.ndim() << "\n"; if (size) print_dimensions (header); @@ -311,18 +293,29 @@ void run () if (offset) std::cout << header.intensity_offset() << "\n"; if (multiplier) std::cout << header.intensity_scale() << "\n"; if (transform) print_transform (header); - if (dwgrad) std::cout << DWI::get_DW_scheme (header) << "\n"; - if (shell_bvalues || shell_sizes) print_shells (header, shell_bvalues, shell_sizes); if (petable) std::cout << PhaseEncoding::get_scheme (header) << "\n"; for (size_t n = 0; n < properties.size(); ++n) print_properties (header, properties[n][0]); + Eigen::MatrixXd grad; + if (export_grad || check_option_group (GradImportOptions) || dwgrad || + shell_bvalues || shell_sizes || shell_indices) { + grad = DWI::get_DW_scheme (header, DWI::get_cmdline_bvalue_scaling_behaviour()); + + if (dwgrad) { + Eigen::IOFormat fmt (Eigen::FullPrecision, 0, " ", "\n", "", "", "", ""); + std::cout << grad.format(fmt) << "\n"; + } + if (shell_bvalues || shell_sizes || shell_indices) + print_shells (grad, shell_bvalues, shell_sizes, shell_indices); + } + DWI::export_grad_commandline (header); PhaseEncoding::export_commandline (header); if (json_keyval) - keyval2json (header, *json_keyval); + File::JSON::write (header, *json_keyval, (argument.size() > 1 ? std::string("") : std::string(argument[0]))); if (json_all) header2json (header, *json_all); @@ -344,6 +337,4 @@ void run () File::OFStream out (opt[0][0]); out << json_all->dump(4) << "\n"; } - } - diff --git a/cmd/mrmath.cpp b/cmd/mrmath.cpp index cc8957a6c1..20a15a399d 100644 --- a/cmd/mrmath.cpp +++ b/cmd/mrmath.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" @@ -58,10 +59,25 @@ void usage () + "Supported operations are:" + "mean, median, sum, product, rms (root-mean-square value), norm (vector 2-norm), var (unbiased variance), " - "std (unbiased standard deviation), min, max, absmax (maximum absolute value), " - "magmax (value with maximum absolute value, preserving its sign)." - - + "See also 'mrcalc' to compute per-voxel operations."; + "std (unbiased standard deviation), min, max, absmax (maximum absolute value), " + "magmax (value with maximum absolute value, preserving its sign)." + + + "This command is used to traverse either along an image axis, or across a " + "set of input images, calculating some statistic from the values along each " + "traversal. If you are seeking to instead perform mathematical calculations " + "that are done independently for each voxel, pleaase see the 'mrcalc' command."; + + EXAMPLES + + Example ("Calculate a 3D volume representing the mean intensity across a 4D image series", + "mrmath 4D.mif mean 3D_mean.mif -axis 3", + "This is a common operation for calculating e.g. the mean value within a " + "specific DWI b-value. Note that axis indices start from 0; thus, axes 0, 1 & 2 " + "are the three spatial axes, and axis 3 operates across volumes.") + + + Example ("Generate a Maximum Intensity Projection (MIP) along the inferior-superior direction", + "mrmath input.mif max MIP.mif -axis 2", + "Since a MIP is literally the maximal value along a specific projection direction, " + "axis-aligned MIPs can be generated easily using mrmath with the \'max\' operation."); ARGUMENTS + Argument ("input", "the input image(s).").type_image_in ().allow_multiple() @@ -72,6 +88,9 @@ void usage () + Option ("axis", "perform operation along a specified axis of a single input image") + Argument ("index").type_integer (0) + + Option ("keep_unary_axes", "Keep unary axes in input images prior to calculating the stats. " + "The default is to wipe axes with single elements.") + + DataType::options(); } @@ -175,22 +194,25 @@ class NORM2 { NOMEMALIGN }; +// Welford's algorithm to avoid catastrophic cancellation class Var { NOMEMALIGN public: - Var () : sum (0.0), sum_sqr (0.0), count (0) { } + Var () : delta (0.0), delta2 (0.0), mean (0.0), m2 (0.0), count (0) { } void operator() (value_type val) { if (std::isfinite (val)) { - sum += val; - sum_sqr += Math::pow2 (val); ++count; + delta = val - mean; + mean += delta / count; + delta2 = val - mean; + m2 += delta * delta2; } } value_type result () const { if (count < 2) return NAN; - return (sum_sqr - Math::pow2 (sum) / static_cast (count)) / (static_cast (count) - 1.0); + return m2 / (static_cast (count) - 1.0); } - double sum, sum_sqr; + double delta, delta2, mean, m2; size_t count; }; @@ -400,8 +422,10 @@ void run () header.datatype() = DataType::from_command_line (DataType::Float32); // Wipe any excess unary-dimensional axes - while (header.size (header.ndim() - 1) == 1) - header.ndim() = header.ndim() - 1; + if ( ! get_options ("keep_unary_axes").size() ) { + while (header.size (header.ndim() - 1) == 1) + header.ndim() = header.ndim() - 1; + } // Verify that dimensions of all input images adequately match for (size_t i = 1; i != num_inputs; ++i) { @@ -419,13 +443,9 @@ void run () if (temp.size(axis) != 1) throw Exception ("Image " + path + " has axis with non-unary dimension beyond first input image " + header.name()); } + header.merge_keyval (temp); } - // Wipe any header information that can't be guaranteed to still be accurate - // after applying an operator across multiple images - header.keyval().erase ("dw_scheme"); - PhaseEncoding::clear_scheme (header); - // Instantiate a kernel depending on the operation requested std::unique_ptr kernel; switch (op) { diff --git a/cmd/mrmetric.cpp b/cmd/mrmetric.cpp index 3b0eb497e8..d801b3a0a2 100644 --- a/cmd/mrmetric.cpp +++ b/cmd/mrmetric.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "algo/loop.h" @@ -333,14 +334,8 @@ void run () using ImageTypeM = Header; n_voxels = 0; - vector
headers; Registration::Transform::Rigid transform; - vector> init_transforms; - Eigen::Matrix padding (0.0, 0.0, 0.0, 0.0); - headers.push_back (Header (input1)); - headers.push_back (Header (input2)); - - Header midway_image_header = compute_minimum_average_header (headers, 1, padding, init_transforms); + Header midway_image_header = compute_minimum_average_header (input1, input2); using LinearInterpolatorType1 = Interp::LinearInterp, Interp::LinearInterpProcessingType::Value>; using LinearInterpolatorType2 = Interp::LinearInterp, Interp::LinearInterpProcessingType::Value>; diff --git a/cmd/mrpad.cpp b/cmd/mrpad.cpp deleted file mode 100644 index fe8a1e03b0..0000000000 --- a/cmd/mrpad.cpp +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ - * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/ - */ - - -#include "command.h" -#include "image.h" -#include "algo/loop.h" - -using namespace MR; -using namespace App; - - - -void usage () -{ - AUTHOR = "David Raffelt (david.raffelt@florey.edu.au)"; - - SYNOPSIS = "Pad an image to increase the FOV"; - - ARGUMENTS - + Argument ("image_in", - "the image to be padded").type_image_in() - + Argument ("image_out", - "the output path for the resulting padded image").type_image_out(); - - OPTIONS - + Option ("uniform", - "pad the input image by a uniform number of voxels on all sides (in 3D)") - + Argument ("number").type_integer (0) - - + Option ("axis", - "pad the input image along the provided axis (defined by index). Lower and upper define " - "the number of voxels to add to the lower and upper bounds of the axis").allow_multiple() - + Argument ("index").type_integer (0, 2) - + Argument ("lower").type_integer (0) - + Argument ("upper").type_integer (0); -} - - -void run () -{ - Header input_header = Header::open (argument[0]); - auto input = input_header.get_image(); - - ssize_t bounds[3][2] = { {0, input_header.size (0) - 1}, - {0, input_header.size (1) - 1}, - {0, input_header.size (2) - 1} }; - - ssize_t padding[3][2] = { {0, 0}, {0, 0}, {0, 0} }; - - auto opt = get_options ("uniform"); - if (opt.size()) { - ssize_t pad = opt[0][0]; - for (size_t axis = 0; axis < 3; axis++) { - padding[axis][0] = pad; - padding[axis][1] = pad; - } - } - - opt = get_options ("axis"); - for (size_t i = 0; i != opt.size(); ++i) { - // Manual padding of axis overrides uniform padding - const size_t axis = opt[i][0]; - padding[axis][0] = opt[i][1]; - padding[axis][1] = opt[i][2]; - } - - Header output_header (input_header); - auto output_transform = input_header.transform(); - for (size_t axis = 0; axis < 3; ++axis) { - output_header.size (axis) = output_header.size(axis) + padding[axis][0] + padding[axis][1]; - output_transform (axis, 3) += (output_transform (axis, 0) * (bounds[0][0] - padding[0][0]) * input_header.spacing (0)) - + (output_transform (axis, 1) * (bounds[1][0] - padding[1][0]) * input_header.spacing (1)) - + (output_transform (axis, 2) * (bounds[2][0] - padding[2][0]) * input_header.spacing (2)); - } - output_header.transform() = output_transform; - auto output = Image::create (argument[1], output_header); - - for (auto l = Loop ("padding image... ", output) (output); l; ++l) { - bool in_bounds = true; - for (size_t axis = 0; axis < 3; ++axis) { - input.index(axis) = output.index(axis) - padding[axis][0]; - if (input.index(axis) < 0 || input.index(axis) >= input_header.size (axis)) - in_bounds = false; - } - if (input.ndim() > 3) - input.index (3) = output.index (3); - if (in_bounds) - output.value() = input.value(); - else - output.value() = 0; - } - -} - diff --git a/cmd/mrregister.cpp b/cmd/mrregister.cpp index e0903d9b34..7e70b25678 100644 --- a/cmd/mrregister.cpp +++ b/cmd/mrregister.cpp @@ -1,36 +1,40 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "image_helpers.h" #include "filter/reslice.h" #include "interp/cubic.h" #include "transform.h" +#include "registration/multi_contrast.h" #include "registration/linear.h" #include "registration/nonlinear.h" #include "registration/metric/demons.h" #include "registration/metric/mean_squared.h" #include "registration/metric/difference_robust.h" -#include "registration/metric/normalised_cross_correlation.h" +#include "registration/metric/local_cross_correlation.h" #include "registration/transform/affine.h" #include "registration/transform/rigid.h" #include "dwi/directions/predefined.h" #include "math/average_space.h" #include "math/SH.h" #include "math/sphere.h" +#include "transform.h" +#include "file/nifti_utils.h" using namespace MR; @@ -38,6 +42,11 @@ using namespace App; const char* transformation_choices[] = { "rigid", "affine", "nonlinear", "rigid_affine", "rigid_nonlinear", "affine_nonlinear", "rigid_affine_nonlinear", NULL }; +const OptionGroup multiContrastOptions = + OptionGroup ("Multi-contrast options") + + Option ("mc_weights", "relative weight of images used for multi-contrast registration. Default: 1.0 (equal weighting)") + + Argument ("weights").type_sequence_float (); + void usage () { @@ -72,19 +81,21 @@ void usage () ARGUMENTS - + Argument ("image1", "input image 1 ('moving')").type_image_in () - + Argument ("image2", "input image 2 ('template')").type_image_in (); + + Argument ("image1 image2", "input image 1 ('moving') and input image 2 ('template')").type_image_in() + + Argument ("contrast1 contrast2", "optional list of additional input images used as additional contrasts. " + "Can be used multiple times. contrastX and imageX must share the same coordinate system. ").type_image_in().optional().allow_multiple(); OPTIONS + Option ("type", "the registration type. Valid choices are: " "rigid, affine, nonlinear, rigid_affine, rigid_nonlinear, affine_nonlinear, rigid_affine_nonlinear (Default: affine_nonlinear)") + Argument ("choice").type_choice (transformation_choices) - + Option ("transformed", "image1 after registration transformed to the space of image2") + + Option ("transformed", "image1 after registration transformed and regridded to the space of image2. " + "Note that -transformed needs to be repeated for each contrast if multi-contrast registration is used.").allow_multiple() + Argument ("image").type_image_out () - + Option ("transformed_midway", "image1 and image2 after registration transformed " - "to the midway space") + + Option ("transformed_midway", "image1 and image2 after registration transformed and regridded to the midway space. " + "Note that -transformed_midway needs to be repeated for each contrast if multi-contrast registration is used.").allow_multiple() + Argument ("image1_transformed").type_image_out () + Argument ("image2_transformed").type_image_out () @@ -94,6 +105,8 @@ void usage () + Option ("mask2", "a mask to define the region of image2 to use for optimisation.") + Argument ("filename").type_image_in () + + Option("nan", "use NaN as out of bounds value. (Default: 0.0)") + + Registration::rigid_options + Registration::affine_options @@ -106,24 +119,47 @@ void usage () + Registration::fod_options + + multiContrastOptions + + DataType::options(); } using value_type = double; +void run () { + vector
input1, input2; + const size_t n_images = argument.size() / 2; + { // parse arguments and load input headers + if (n_images * 2 != argument.size()) { + std::string err; + for (const auto & a : argument) + err += " " + str(a); + throw Exception ("unexpected number of input images. arguments:" + err); + } -void run () -{ - - Image im1_image = Image::open (argument[0]).with_direct_io (Stride::contiguous_along_axis (3)); - Image im2_image = Image::open (argument[1]).with_direct_io (Stride::contiguous_along_axis (3)); + bool is1 = true; + for (const auto& arg : argument) { + if (is1) + input1.push_back (Header::open (str(arg))); + else + input2.push_back (Header::open (str(arg))); + is1 = !is1; + } + } + assert (input1.size() == n_images); + if (input1.size() != input2.size()) + throw Exception ("require same number of input images for image 1 and image 2"); + + for (size_t i=0; i registration without reorientation. + // will be set to false if registration of all input SH images has lmax==0 + bool do_reorientation = !reorientation_forbidden; Eigen::MatrixXd directions_cartesian; opt = get_options ("directions"); if (opt.size()) directions_cartesian = Math::Sphere::spherical2cartesian (load_matrix (opt[0][0])).transpose(); - int image_lmax = 0; - - if (im1_image.ndim() > 4) { - throw Exception ("image dimensions larger than 4 are not supported"); - } else if (im1_image.ndim() == 3) { - do_reorientation = false; - } else if (im1_image.ndim() == 4) { - if (im1_image.size(3) != im2_image.size(3)) - throw Exception ("input images do not have the same number of volumes in the 4th dimension"); - image_lmax = Math::SH::LforN (im1_image.size(3)); - value_type val = (std::sqrt (float (1 + 8 * im2_image.size(3))) - 3.0) / 4.0; - if (!(val - (int)val) && do_reorientation && im2_image.size(3) > 1) { - CONSOLE ("SH series detected, performing FOD registration"); - do_reorientation = true; - if (!directions_cartesian.cols()) - directions_cartesian = Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_60()).transpose(); - } else { - do_reorientation = false; - if (directions_cartesian.cols()) - WARN ("-directions option ignored since no FOD reorientation is being performed"); + // check header transformations for equality + Eigen::MatrixXd trafo = MR::Transform(input1[0]).scanner2voxel.linear(); + for (size_t i=1; i mc_params (n_images); + for (auto& mc : mc_params) { + mc.do_reorientation = do_reorientation; + } + // set parameters for each contrast + for (size_t i=0; i0) check_dimensions (input1[i], input1[i-1], 0, 3); + if (i>0) check_dimensions (input2[i], input2[i-1], 0, 3); + if ((input1[i].ndim() != 3) and (input1[i].ndim() != 4)) + throw Exception ("image dimensionality other than 3 or 4 are not supported. image " + + str(input1[i].name()) + " is " + str(input1[i].ndim()) + " dimensional"); + + const size_t nvols1 = input1[i].ndim() == 3 ? 1 : input1[i].size(3); + const size_t nvols2 = input2[i].ndim() == 3 ? 1 : input2[i].size(3); + if (nvols1 != nvols2) + throw Exception ("input images do not have the same number of volumes: " + str(input2[i].name()) + " and " + str(input1[i].name())); + + // set do_reorientation and image_lmax + if (nvols1 == 1) { // 3D or one volume + mc_params[i].do_reorientation = false; + mc_params[i].image_lmax = 0; + CONSOLE ("3D input pair "+input1[i].name()+", "+input2[i].name()); + } else { // more than one volume + if (do_reorientation && nvols1 > 1 && SH::NforL(SH::LforN(nvols1)) == nvols1) { + CONSOLE ("SH image input pair "+input1[i].name()+", "+input2[i].name()); + mc_params[i].do_reorientation = true; + mc_params[i].image_lmax = Math::SH::LforN (nvols1); + if (!directions_cartesian.cols()) + directions_cartesian = Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_60()).transpose(); + } else { + CONSOLE ("4D scalar input pair "+input1[i].name()+", "+input2[i].name()); + mc_params[i].do_reorientation = false; + mc_params[i].image_lmax = 0; + } } + // set lmax to image_lmax and set image_nvols + mc_params[i].lmax = mc_params[i].image_lmax; + mc_params[i].image_nvols = input1[i].ndim() < 4 ? 1 : input1[i].size(3); } + ssize_t max_mc_image_lmax = std::max_element(mc_params.begin(), mc_params.end(), + [](const Registration::MultiContrastSetting& x, const Registration::MultiContrastSetting& y) + {return x.lmax < y.lmax;})->lmax; + + do_reorientation = std::any_of(mc_params.begin(), mc_params.end(), + [](Registration::MultiContrastSetting const& i){return i.do_reorientation;}); + if (do_reorientation) + CONSOLE("performing FOD registration"); + if (!do_reorientation and directions_cartesian.cols()) + WARN ("-directions option ignored since no FOD reorientation is being performed"); + + INFO ("maximum input lmax: "+str(max_mc_image_lmax)); opt = get_options ("transformed"); - Image im1_transformed; - if (opt.size()){ - Header transformed_header (im2_image); - transformed_header.datatype() = DataType::from_command_line (DataType::Float32); - im1_transformed = Image::create (opt[0][0], transformed_header).with_direct_io(); + vector im1_transformed_paths; + if (opt.size()) { + if (opt.size() > n_images) + throw Exception ("number of -transformed images exceeds number of contrasts"); + if (opt.size() != n_images) + WARN ("number of -transformed images lower than number of contrasts"); + for (size_t c = 0; c < opt.size(); c++) { + Registration::check_image_output (opt[c][0], input2[c]); + im1_transformed_paths.push_back(opt[c][0]); + INFO (input1[c].name() + ", transformed to space of image2, will be written to " + im1_transformed_paths[c]); + } } - std::string im1_midway_transformed_path; - std::string im2_midway_transformed_path; + + vector input1_midway_transformed_paths; + vector input2_midway_transformed_paths; opt = get_options ("transformed_midway"); - if (opt.size()){ - im1_midway_transformed_path = str(opt[0][0]); - im2_midway_transformed_path = str(opt[0][1]); + if (opt.size()) { + if (opt.size() > n_images) + throw Exception ("number of -transformed_midway images exceeds number of contrasts"); + if (opt.size() != n_images) + WARN ("number of -transformed_midway images lower than number of contrasts"); + for (size_t c = 0; c < opt.size(); c++) { + Registration::check_image_output (opt[c][0], input2[c]); + input1_midway_transformed_paths.push_back(opt[c][0]); + INFO (input1[c].name() + ", transformed to midway space, will be written to " + input1_midway_transformed_paths[c]); + Registration::check_image_output (opt[c][1], input1[c]); + input2_midway_transformed_paths.push_back(opt[c][1]); + INFO (input2[c].name() + ", transformed to midway space, will be written to " + input2_midway_transformed_paths[c]); + } } opt = get_options ("mask1"); Image im1_mask; if (opt.size ()) { im1_mask = Image::open(opt[0][0]); - check_dimensions (im1_image, im1_mask, 0, 3); + check_dimensions (input1[0], im1_mask, 0, 3); } - opt = get_options ("mask2"); Image im2_mask; if (opt.size ()) { im2_mask = Image::open(opt[0][0]); - check_dimensions (im2_image, im2_mask, 0, 3); + check_dimensions (input2[0], im2_mask, 0, 3); } + // Out of bounds value + value_type out_of_bounds_value = 0.0; + opt = get_options ("nan"); + if (opt.size()) + out_of_bounds_value = NAN; + // ****** RIGID REGISTRATION OPTIONS ******* Registration::Linear rigid_registration; @@ -267,9 +368,15 @@ void run () bool init_rigid_matrix_set = false; if (opt.size()) { init_rigid_matrix_set = true; - transform_type rigid_transform = load_transform (opt[0][0]); + Eigen::Vector3 centre; + transform_type rigid_transform = load_transform (opt[0][0], centre); rigid.set_transform (rigid_transform); - rigid_registration.set_init_translation_type (Registration::Transform::Init::set_centre_mass); + if (!std::isfinite(centre(0))) { + rigid_registration.set_init_translation_type (Registration::Transform::Init::set_centre_mass); + } else { + rigid.set_centre_without_transform_update(centre); + rigid_registration.set_init_translation_type (Registration::Transform::Init::none); + } } opt = get_options ("rigid_init_translation"); @@ -281,8 +388,6 @@ void run () opt = get_options ("rigid_init_rotation"); if (opt.size()) { - if (init_rigid_matrix_set) - throw Exception ("options -rigid_init_matrix and -rigid_init_rotation are mutually exclusive"); Registration::set_init_rotation_model_from_option (rigid_registration, (int)opt[0][0]); } @@ -293,13 +398,6 @@ void run () rigid_registration.set_scale_factor (parse_floats (opt[0][0])); } - // opt = get_options ("rigid_stage.iterations"); - // if (opt.size ()) { - // if (!do_rigid) - // throw Exception ("the rigid iterations were input when no rigid registration is requested"); - // rigid_registration.set_stage_iterations (parse_ints (opt[0][0])); - // } - opt = get_options ("rigid_loop_density"); if (opt.size ()) { if (!do_rigid) @@ -357,13 +455,15 @@ void run () if (opt.size ()) { if (!do_rigid) throw Exception ("the -rigid_lmax option has been set when no rigid registration is requested"); - if (im1_image.ndim() < 4) - throw Exception ("-rigid_lmax option is not valid with 3D images"); + if (max_mc_image_lmax == 0) + throw Exception ("-rigid_lmax option is not valid if no input image is FOD image"); rigid_lmax = parse_ints (opt[0][0]); - rigid_registration.set_lmax (rigid_lmax); for (size_t i = 0; i < rigid_lmax.size (); ++i) - if (rigid_lmax[i] > image_lmax) - throw Exception ("the requested -rigid_lmax exceeds the lmax of the input images"); + if (rigid_lmax[i] > max_mc_image_lmax) { + WARN ("the requested -rigid_lmax exceeds the lmax of the input images, setting it to " + str(max_mc_image_lmax)); + rigid_lmax[i] = max_mc_image_lmax; + } + rigid_registration.set_lmax (rigid_lmax); } std::ofstream linear_logstream; @@ -374,6 +474,8 @@ void run () linear_logstream.open (opt[0][0]); rigid_registration.set_log_stream (linear_logstream.rdbuf()); } + + // ****** AFFINE REGISTRATION OPTIONS ******* Registration::Linear affine_registration; opt = get_options ("affine"); @@ -416,9 +518,15 @@ void run () throw Exception ("you cannot initialise with -affine_init_matrix since a rigid registration is being performed"); init_affine_matrix_set = true; - transform_type init_affine = load_transform (opt[0][0]); - affine.set_transform (init_affine); - affine_registration.set_init_translation_type (Registration::Transform::Init::set_centre_mass); + Eigen::Vector3 centre; + transform_type affine_transform = load_transform (opt[0][0], centre); + affine.set_transform (affine_transform); + if (!std::isfinite(centre(0))) { + affine_registration.set_init_translation_type (Registration::Transform::Init::set_centre_mass); + } else { + affine.set_centre_without_transform_update(centre); + affine_registration.set_init_translation_type (Registration::Transform::Init::none); + } } opt = get_options ("affine_init_translation"); @@ -442,13 +550,6 @@ void run () affine_registration.set_scale_factor (parse_floats (opt[0][0])); } - // opt = get_options ("affine_stage.iterations"); - // if (opt.size ()) { - // if (!do_affine) - // throw Exception ("the affine repetition factors were input when no affine registration is requested"); - // affine_registration.set_stage_iterations (parse_ints (opt[0][0])); - // } - opt = get_options ("affine_loop_density"); if (opt.size ()) { if (!do_affine) @@ -506,13 +607,15 @@ void run () if (opt.size ()) { if (!do_affine) throw Exception ("the -affine_lmax option has been set when no affine registration is requested"); - if (im1_image.ndim() < 4) - throw Exception ("-affine_lmax option is not valid with 3D images"); + if (max_mc_image_lmax == 0) + throw Exception ("-affine_lmax option is not valid if no input image is FOD image"); affine_lmax = parse_ints (opt[0][0]); - affine_registration.set_lmax (affine_lmax); for (size_t i = 0; i < affine_lmax.size (); ++i) - if (affine_lmax[i] > image_lmax) - throw Exception ("the requested -affine_lmax exceeds the lmax of the input images"); + if (affine_lmax[i] > max_mc_image_lmax) { + WARN ("the requested -affine_lmax exceeds the lmax of the input images, setting it to " + str(max_mc_image_lmax)); + affine_lmax[i] = max_mc_image_lmax; + } + affine_registration.set_lmax (affine_lmax); } opt = get_options ("affine_log"); @@ -565,6 +668,9 @@ void run () if (!do_nonlinear) throw Exception ("Non-linear warp output requested when no non-linear registration is requested"); warp_full_filename = std::string (opt[0][0]); + if (!Path::is_mrtrix_image (warp_full_filename) && !(Path::has_suffix (warp_full_filename, {".nii", ".nii.gz"}) && + File::Config::get_bool ("NIfTIAutoSaveJSON", false))) + throw Exception ("nl_warp_full output requires .mif/.mih or NIfTI file format with NIfTIAutoSaveJSON config option set."); } @@ -576,6 +682,12 @@ void run () if (!do_nonlinear) throw Exception ("the non linear initialisation option -nl_init cannot be used when no non linear registration is requested"); + if (!Path::is_mrtrix_image (opt[0][0]) && !(Path::has_suffix (opt[0][0], {".nii", ".nii.gz"}) && + File::Config::get_bool ("NIfTIAutoLoadJSON", false) && + Path::exists(File::NIfTI::get_json_path(opt[0][0])))) + WARN ("nl_init input requires warp_full in original .mif/.mih file format or in NIfTI file format with associated JSON. " + "Converting to other file formats may remove linear transformations stored in the image header."); + Image input_warps = Image::open (opt[0][0]); if (input_warps.ndim() != 5) throw Exception ("non-linear initialisation input is not 5D. Input must be from previous non-linear output"); @@ -643,81 +755,147 @@ void run () opt = get_options ("nl_lmax"); vector nl_lmax; - if (opt.size ()) { + if (opt.size()) { if (!do_nonlinear) throw Exception ("the -nl_lmax option has been set when no non-linear registration is requested"); - if (im1_image.ndim() < 4) + if (input1[0].ndim() < 4) throw Exception ("-nl_lmax option is not valid with 3D images"); nl_lmax = parse_ints (opt[0][0]); nl_registration.set_lmax (nl_lmax); for (size_t i = 0; i < (nl_lmax).size (); ++i) - if ((nl_lmax)[i] > image_lmax) + if ((nl_lmax)[i] > max_mc_image_lmax) throw Exception ("the requested -nl_lmax exceeds the lmax of the input images"); } + // ****** MC options ******* + // TODO: set tissue specific lmax? + + opt = get_options ("mc_weights"); + if (opt.size()) { + vector mc_weights = parse_floats (opt[0][0]); + if (mc_weights.size() == 1) + mc_weights.resize (n_images, mc_weights[0]); + else if (mc_weights.size() != n_images) + throw Exception ("number of mc_weights does not match number of contrasts"); + for (const default_type & w : mc_weights) + if (w < 0.0) throw Exception ("mc_weights must be non-negative"); + + if (do_nonlinear) { + default_type sm = 0.0; + std::for_each (mc_weights.begin(), mc_weights.end(), [&] (default_type n) {sm += n;}); + if (MR::abs (sm - n_images) > 1.e-6) + WARN ("mc_weights do not sum to the number of contrasts. This changes the regularisation of the nonlinear registration."); + } + + for (size_t idx = 0; idx < n_images; idx++) + mc_params[idx].weight = mc_weights[idx]; + } + + { + ssize_t max_requested_lmax = 0; + if (max_mc_image_lmax != 0) { + if (do_rigid) max_requested_lmax = std::max(max_requested_lmax, rigid_registration.get_lmax()); + if (do_affine) max_requested_lmax = std::max(max_requested_lmax, affine_registration.get_lmax()); + if (do_nonlinear) max_requested_lmax = std::max(max_requested_lmax, nl_registration.get_lmax()); + INFO ("maximum used lmax: "+str(max_requested_lmax)); + } + + for (size_t idx = 0; idx < n_images; ++idx) { + mc_params[idx].lmax = std::min (mc_params[idx].image_lmax, max_requested_lmax); + if (input1[idx].ndim() == 3) + mc_params[idx].nvols = 1; + else if (mc_params[idx].do_reorientation) { + mc_params[idx].nvols = Math::SH::NforL (mc_params[idx].lmax); + } else + mc_params[idx].nvols = input1[idx].size(3); + } + mc_params[0].start = 0; + for (size_t idx = 1; idx < n_images; ++idx) + mc_params[idx].start = mc_params[idx-1].start + mc_params[idx-1].nvols; + + for (const auto & mc : mc_params) + DEBUG (str(mc)); + } + + if (mc_params.size() > 1) { + if (do_rigid) rigid_registration.set_mc_parameters (mc_params); + if (do_affine) affine_registration.set_mc_parameters (mc_params); + if (do_nonlinear) nl_registration.set_mc_parameters (mc_params); + } + + // ****** PARSING DONE, PRELOAD THE DATA ******* + // only load the volumes we actually need for the highest lmax requested + // load multiple tissue types into the same 4D image + // drop last axis if input is 4D with one volume for speed reasons + Image images1, images2; + INFO ("preloading input1..."); + Registration::preload_data (input1, images1, mc_params); + INFO ("preloading input2..."); + Registration::preload_data (input2, images2, mc_params); + INFO ("preloading input images done"); // ****** RUN RIGID REGISTRATION ******* if (do_rigid) { CONSOLE ("running rigid registration"); - if (im2_image.ndim() == 4) { + if (images2.ndim() == 4) { if (do_reorientation) rigid_registration.set_directions (directions_cartesian); // if (rigid_metric == Registration::NCC) // TODO if (rigid_metric == Registration::Diff) { if (rigid_estimator == Registration::None) { Registration::Metric::MeanSquared4D, Image> metric; - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else if (rigid_estimator == Registration::L1) { Registration::Metric::L1 estimator; - Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::L1> metric (im1_image, im2_image, estimator); - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::L1> metric (images1, images2, estimator); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else if (rigid_estimator == Registration::L2) { Registration::Metric::L2 estimator; - Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::L2> metric (im1_image, im2_image, estimator); - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::L2> metric (images1, images2, estimator); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else if (rigid_estimator == Registration::LP) { Registration::Metric::LP estimator; - Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::LP> metric (im1_image, im2_image, estimator); - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::LP> metric (images1, images2, estimator); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else throw Exception ("FIXME: estimator selection"); } else throw Exception ("FIXME: metric selection"); } else { // 3D if (rigid_metric == Registration::NCC){ - Registration::Metric::NormalisedCrossCorrelation metric; + Registration::Metric::LocalCrossCorrelation metric; vector extent(3,3); rigid_registration.set_extent (extent); - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else if (rigid_metric == Registration::Diff) { if (rigid_estimator == Registration::None) { Registration::Metric::MeanSquared metric; - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else if (rigid_estimator == Registration::L1) { Registration::Metric::L1 estimator; Registration::Metric::DifferenceRobust metric(estimator); - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else if (rigid_estimator == Registration::L2) { Registration::Metric::L2 estimator; Registration::Metric::DifferenceRobust metric(estimator); - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else if (rigid_estimator == Registration::LP) { Registration::Metric::LP estimator; Registration::Metric::DifferenceRobust metric(estimator); - rigid_registration.run_masked (metric, rigid, im1_image, im2_image, im1_mask, im2_mask); + rigid_registration.run_masked (metric, rigid, images1, images2, im1_mask, im2_mask); } else throw Exception ("FIXME: estimator selection"); } else throw Exception ("FIXME: metric selection"); } if (output_rigid_1tomid) - save_transform (rigid.get_transform_half(), rigid_1tomid_filename); + save_transform (rigid.get_transform_half(), rigid.get_centre(), rigid_1tomid_filename); if (output_rigid_2tomid) - save_transform (rigid.get_transform_half_inverse(), rigid_2tomid_filename); + save_transform (rigid.get_transform_half_inverse(), rigid.get_centre(), rigid_2tomid_filename); if (output_rigid) - save_transform (rigid.get_transform(), rigid_filename); + save_transform (rigid.get_transform(), rigid.get_centre(), rigid_filename); } // ****** RUN AFFINE REGISTRATION ******* @@ -730,64 +908,62 @@ void run () affine_registration.set_init_translation_type (Registration::Transform::Init::none); } - - if (im2_image.ndim() == 4) { + if (images2.ndim() == 4) { if (do_reorientation) affine_registration.set_directions (directions_cartesian); // if (affine_metric == Registration::NCC) // TODO if (affine_metric == Registration::Diff) { if (affine_estimator == Registration::None) { Registration::Metric::MeanSquared4D, Image> metric; - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else if (affine_estimator == Registration::L1) { Registration::Metric::L1 estimator; - Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::L1> metric (im1_image, im2_image, estimator); - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::L1> metric (images1, images2, estimator); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else if (affine_estimator == Registration::L2) { Registration::Metric::L2 estimator; - Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::L2> metric (im1_image, im2_image, estimator); - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::L2> metric (images1, images2, estimator); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else if (affine_estimator == Registration::LP) { Registration::Metric::LP estimator; - Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::LP> metric (im1_image, im2_image, estimator); - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + Registration::Metric::DifferenceRobust4D, Image, Registration::Metric::LP> metric (images1, images2, estimator); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else throw Exception ("FIXME: estimator selection"); } else throw Exception ("FIXME: metric selection"); } else { // 3D if (affine_metric == Registration::NCC){ - Registration::Metric::NormalisedCrossCorrelation metric; + Registration::Metric::LocalCrossCorrelation metric; vector extent(3,3); affine_registration.set_extent (extent); - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else if (affine_metric == Registration::Diff) { if (affine_estimator == Registration::None) { Registration::Metric::MeanSquared metric; - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else if (affine_estimator == Registration::L1) { Registration::Metric::L1 estimator; Registration::Metric::DifferenceRobust metric(estimator); - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else if (affine_estimator == Registration::L2) { Registration::Metric::L2 estimator; Registration::Metric::DifferenceRobust metric(estimator); - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else if (affine_estimator == Registration::LP) { Registration::Metric::LP estimator; Registration::Metric::DifferenceRobust metric(estimator); - affine_registration.run_masked (metric, affine, im1_image, im2_image, im1_mask, im2_mask); + affine_registration.run_masked (metric, affine, images1, images2, im1_mask, im2_mask); } else throw Exception ("FIXME: estimator selection"); } else throw Exception ("FIXME: metric selection"); } - - if (output_affine) - save_transform (affine.get_transform(), affine_filename); - if (output_affine_1tomid) - save_transform (affine.get_transform_half(), affine_1tomid_filename); + save_transform (affine.get_transform_half(), affine.get_centre(), affine_1tomid_filename); if (output_affine_2tomid) - save_transform (affine.get_transform_half_inverse(), affine_2tomid_filename); + save_transform (affine.get_transform_half_inverse(), affine.get_centre(), affine_2tomid_filename); + + if (output_affine) + save_transform (affine.get_transform(), affine.get_centre(), affine_filename); } @@ -799,14 +975,13 @@ void run () nl_registration.set_aPSF_directions (directions_cartesian); if (do_affine || init_affine_matrix_set) { - nl_registration.run (affine, im1_image, im2_image, im1_mask, im2_mask); + nl_registration.run (affine, images1, images2, im1_mask, im2_mask); } else if (do_rigid || init_rigid_matrix_set) { - nl_registration.run (rigid, im1_image, im2_image, im1_mask, im2_mask); + nl_registration.run (rigid, images1, images2, im1_mask, im2_mask); } else { Registration::Transform::Affine identity_transform; - nl_registration.run (identity_transform, im1_image, im2_image, im1_mask, im2_mask); + nl_registration.run (identity_transform, images1, images2, im1_mask, im2_mask); } - if (warp_full_filename.size()) { //TODO add affine parameters to comments too? Header output_header = nl_registration.get_output_warps_header(); @@ -818,7 +993,7 @@ void run () } if (warp1_filename.size()) { - Header output_header (im2_image); + Header output_header (images2); output_header.ndim() = 4; output_header.size(3) =3; nl_registration.write_params_to_header (output_header); @@ -831,7 +1006,7 @@ void run () } if (warp2_filename.size()) { - Header output_header (im1_image); + Header output_header (images1); output_header.ndim() = 4; output_header.size(3) = 3; nl_registration.write_params_to_header (output_header); @@ -845,77 +1020,152 @@ void run () } + if (im1_transformed_paths.size()) { + CONSOLE ("Writing input images1 transformed to space of images2..."); - if (im1_transformed.valid()) { - INFO ("Outputting tranformed input images..."); - + Image deform_field; if (do_nonlinear) { - Header deform_header (im1_transformed); + Header deform_header (input2[0]); deform_header.ndim() = 4; deform_header.size(3) = 3; - Image deform_field = Image::scratch (deform_header); + deform_field = Image::scratch (deform_header); Registration::Warp::compute_full_deformation (nl_registration.get_im2_to_mid_linear().inverse(), *(nl_registration.get_mid_to_im2()), *(nl_registration.get_im1_to_mid()), nl_registration.get_im1_to_mid_linear(), deform_field); + } - Filter::warp (im1_image, im1_transformed, deform_field, 0.0); - if (do_reorientation) - Registration::Transform::reorient_warp ("reorienting FODs", - im1_transformed, - deform_field, - Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); - - } else if (do_affine) { - Filter::reslice (im1_image, im1_transformed, affine.get_transform(), Adapter::AutoOverSample, 0.0); - if (do_reorientation) - Registration::Transform::reorient ("reorienting FODs", - im1_transformed, - im1_transformed, - affine.get_transform(), - Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); - } else { // rigid - Filter::reslice (im1_image, im1_transformed, rigid.get_transform(), Adapter::AutoOverSample, 0.0); - if (do_reorientation) - Registration::Transform::reorient ("reorienting FODs", - im1_transformed, - im1_transformed, - rigid.get_transform(), - Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + for (size_t idx = 0; idx < im1_transformed_paths.size(); idx++) { + CONSOLE ("... " + im1_transformed_paths[idx]); + { + // LogLevelLatch log_level (0); + Image im1_image = Image::open (input1[idx].name()); + + Header transformed_header (input2[idx]); + transformed_header.datatype() = DataType::from_command_line (DataType::Float32); + Image im1_transformed = Image::create (im1_transformed_paths[idx], transformed_header); + + const size_t nvols = im1_image.ndim() == 3 ? 1 : im1_image.size(3); + const bool reorient_output = !reorientation_forbidden && (nvols > 1) && SH::NforL(SH::LforN(nvols)) == nvols; + + if (do_nonlinear) { + Filter::warp (im1_image, im1_transformed, deform_field, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient_warp ("reorienting FODs", + im1_transformed, + deform_field, + Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } else if (do_affine) { + Filter::reslice (im1_image, im1_transformed, affine.get_transform(), Adapter::AutoOverSample, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient ("reorienting FODs", + im1_transformed, + im1_transformed, + affine.get_transform(), + Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } else { // rigid + Filter::reslice (im1_image, im1_transformed, rigid.get_transform(), Adapter::AutoOverSample, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient ("reorienting FODs", + im1_transformed, + im1_transformed, + rigid.get_transform(), + Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } + } } } - if (!im1_midway_transformed_path.empty() and !im2_midway_transformed_path.empty()) { - if (do_nonlinear) { - Header midway_header (*nl_registration.get_im1_to_mid()); - midway_header.datatype() = DataType::from_command_line (DataType::Float32); - midway_header.ndim() = im1_image.ndim(); - if (midway_header.ndim() == 4) - midway_header.size(3) = im1_image.size(3); + if (input1_midway_transformed_paths.size() and input2_midway_transformed_paths.size()) { + Header midway_header; + Image im1_deform_field, im2_deform_field; - Image im1_deform_field = Image::scratch (*(nl_registration.get_im1_to_mid())); + if (do_nonlinear) + midway_header = Header (*nl_registration.get_im1_to_mid()); + else if (do_affine) + midway_header = compute_minimum_average_header (input1[0], input2[0], affine.get_transform_half_inverse(), affine.get_transform_half()); + else // rigid + midway_header = compute_minimum_average_header (input1[0], input2[0], rigid.get_transform_half_inverse(), rigid.get_transform_half()); + midway_header.datatype() = DataType::from_command_line (DataType::Float32); + + // process input1 then input2 to reduce memory consumption + CONSOLE ("Writing input1 transformed to midway..."); + if (do_nonlinear) { + im1_deform_field = Image::scratch (*(nl_registration.get_im1_to_mid())); Registration::Warp::compose_linear_deformation (nl_registration.get_im1_to_mid_linear(), *(nl_registration.get_im1_to_mid()), im1_deform_field); + } - auto im1_midway = Image::create (im1_midway_transformed_path, midway_header).with_direct_io(); - Filter::warp (im1_image, im1_midway, im1_deform_field, 0.0); - if (do_reorientation) - Registration::Transform::reorient_warp ("reorienting FODs", im1_midway, im1_deform_field, - Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + for (size_t idx = 0; idx < input1_midway_transformed_paths.size(); idx++) { + CONSOLE ("... " + input1_midway_transformed_paths[idx]); + { + // LogLevelLatch log_level (0); + Image im1_image = Image::open (input1[idx].name()); + midway_header.ndim() = im1_image.ndim(); + if (midway_header.ndim() == 4) + midway_header.size(3) = im1_image.size(3); + + const size_t nvols = im1_image.ndim() == 3 ? 1 : im1_image.size(3); + const bool reorient_output = !reorientation_forbidden && (nvols > 1) && SH::NforL(SH::LforN(nvols)) == nvols; + + if (do_nonlinear) { + auto im1_midway = Image::create (input1_midway_transformed_paths[idx], midway_header); + Filter::warp (im1_image, im1_midway, im1_deform_field, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient_warp ("reorienting ODFs", im1_midway, im1_deform_field, + Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } else if (do_affine) { + auto im1_midway = Image::create (input1_midway_transformed_paths[idx], midway_header); + Filter::reslice (im1_image, im1_midway, affine.get_transform_half(), Adapter::AutoOverSample, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient ("reorienting ODFs", im1_midway, im1_midway, affine.get_transform_half(), Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } else { // rigid + auto im1_midway = Image::create (input1_midway_transformed_paths[idx], midway_header); + Filter::reslice (im1_image, im1_midway, rigid.get_transform_half(), Adapter::AutoOverSample, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient ("reorienting ODFs", im1_midway, im1_midway, rigid.get_transform_half(), Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } + } + } - Image im2_deform_field = Image::scratch (*(nl_registration.get_im2_to_mid())); + CONSOLE ("Writing input2 transformed to midway..."); + if (do_nonlinear) { + im2_deform_field = Image::scratch (*(nl_registration.get_im2_to_mid())); Registration::Warp::compose_linear_deformation (nl_registration.get_im2_to_mid_linear(), *(nl_registration.get_im2_to_mid()), im2_deform_field); - auto im2_midway = Image::create (im2_midway_transformed_path, midway_header).with_direct_io(); - Filter::warp (im2_image, im2_midway, im2_deform_field, 0.0); - if (do_reorientation) - Registration::Transform::reorient_warp ("reorienting FODs", im2_midway, im2_deform_field, - Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } - } else if (do_affine) { - affine_registration.write_transformed_images (im1_image, im2_image, affine, im1_midway_transformed_path, im2_midway_transformed_path, do_reorientation); - } else { - rigid_registration.write_transformed_images (im1_image, im2_image, rigid, im1_midway_transformed_path, im2_midway_transformed_path, do_reorientation); + for (size_t idx = 0; idx < input2_midway_transformed_paths.size(); idx++) { + CONSOLE ("... " + input2_midway_transformed_paths[idx]); + { + // LogLevelLatch log_level (0); + Image im2_image = Image::open (input2[idx].name()); + midway_header.ndim() = im2_image.ndim(); + if (midway_header.ndim() == 4) + midway_header.size(3) = im2_image.size(3); + + const size_t nvols = im2_image.ndim() == 3 ? 1 : im2_image.size(3); + const value_type val = (std::sqrt (float (1 + 8 * nvols)) - 3.0) / 4.0; + const bool reorient_output = !reorientation_forbidden && (nvols > 1) && !(val - (int)val); + + if (do_nonlinear) { + auto im2_midway = Image::create (input2_midway_transformed_paths[idx], midway_header); + Filter::warp (im2_image, im2_midway, im2_deform_field, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient_warp ("reorienting ODFs", im2_midway, im2_deform_field, + Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } else if (do_affine) { + auto im2_midway = Image::create (input2_midway_transformed_paths[idx], midway_header); + Filter::reslice (im2_image, im2_midway, affine.get_transform_half_inverse(), Adapter::AutoOverSample, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient ("reorienting ODFs", im2_midway, im2_midway, affine.get_transform_half_inverse(), Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } else { // rigid + auto im2_midway = Image::create (input2_midway_transformed_paths[idx], midway_header); + Filter::reslice (im2_image, im2_midway, rigid.get_transform_half_inverse(), Adapter::AutoOverSample, out_of_bounds_value); + if (reorient_output) + Registration::Transform::reorient ("reorienting ODFs", im2_midway, im2_midway, rigid.get_transform_half_inverse(), Math::Sphere::spherical2cartesian (DWI::Directions::electrostatic_repulsion_300()).transpose()); + } + } } } diff --git a/cmd/mrresize.cpp b/cmd/mrresize.cpp deleted file mode 100644 index 9ce3e313de..0000000000 --- a/cmd/mrresize.cpp +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ - * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/ - */ - - -#include "command.h" -#include "image.h" -#include "filter/resize.h" -#include "progressbar.h" - - -using namespace MR; -using namespace App; - -const char* interp_choices[] = { "nearest", "linear", "cubic", "sinc", NULL }; - -void usage () -{ - AUTHOR = "David Raffelt (david.raffelt@florey.edu.au)"; - - SYNOPSIS = "Resize an image by defining the new image resolution, voxel size or a scale factor"; - - DESCRIPTION - + "Note that if the image is 4D, then only the first 3 dimensions can be resized." - + "Also note that if the image is down-sampled, the appropriate smoothing is automatically applied using Gaussian smoothing."; - - ARGUMENTS - + Argument ("input", "input image to be resized.").type_image_in () - + Argument ("output", "the output image.").type_image_out (); - - OPTIONS - + Option ("size", "define the new image size for the output image. " - "This should be specified as a comma-separated list.") - + Argument ("dims").type_sequence_int() - - + Option ("voxel", "define the new voxel size for the output image. " - "This can be specified either as a single value to be used for all dimensions, " - "or as a comma-separated list of the size for each voxel dimension.") - + Argument ("size").type_sequence_float() - - + Option ("scale", "scale the image resolution by the supplied factor. " - "This can be specified either as a single value to be used for all dimensions, " - "or as a comma-separated list of scale factors for each dimension.") - + Argument ("factor").type_sequence_float() - - + Option ("interp", - "set the interpolation method to use when resizing (choices: nearest, linear, cubic, sinc. Default: cubic).") - + Argument ("method").type_choice (interp_choices) - - + DataType::options(); -} - - -void run () { - - - auto input = Image::open (argument[0]); - - Filter::Resize resize_filter (input); - - size_t resize_option_count = 0; - - vector scale; - auto opt = get_options ("scale"); - if (opt.size()) { - scale = parse_floats (opt[0][0]); - if (scale.size() == 1) - scale.resize (3, scale[0]); - resize_filter.set_scale_factor (scale); - ++resize_option_count; - } - - vector voxel_size; - opt = get_options ("voxel"); - if (opt.size()) { - voxel_size = parse_floats (opt[0][0]); - if (voxel_size.size() == 1) - voxel_size.resize (3, voxel_size[0]); - resize_filter.set_voxel_size (voxel_size); - ++resize_option_count; - } - - vector image_size; - opt = get_options ("size"); - if (opt.size()) { - image_size = parse_ints(opt[0][0]); - resize_filter.set_size (image_size); - ++resize_option_count; - } - - - int interp = 2; - opt = get_options ("interp"); - if (opt.size()) { - interp = opt[0][0]; - resize_filter.set_interp_type (interp); - } - - if (!resize_option_count) - throw Exception ("please use either the -scale, -voxel, or -resolution option to resize the image"); - if (resize_option_count != 1) - throw Exception ("only a single method can be used to resize the image (image resolution, voxel size or scale factor)"); - - Header header (resize_filter); - header.datatype() = DataType::from_command_line (DataType::from ()); - auto output = Image::create (argument[1], header); - - resize_filter (input, output); -} diff --git a/cmd/mrstats.cpp b/cmd/mrstats.cpp index 514cbfc59e..6ffbb78434 100644 --- a/cmd/mrstats.cpp +++ b/cmd/mrstats.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" diff --git a/cmd/mrthreshold.cpp b/cmd/mrthreshold.cpp index ea73158c17..502191f8b6 100644 --- a/cmd/mrthreshold.cpp +++ b/cmd/mrthreshold.cpp @@ -1,27 +1,26 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - -#include - #include "command.h" +#include "exception.h" #include "image.h" #include "image_helpers.h" -#include "memory.h" -#include "progressbar.h" -#include "types.h" +#include "adapter/replicate.h" +#include "adapter/subset.h" #include "algo/loop.h" #include "filter/optimal_threshold.h" @@ -29,206 +28,462 @@ using namespace MR; using namespace App; + +enum class operator_type { LT, LE, GE, GT, UNDEFINED }; +const char* const operator_list[] = { "lt", "le", "ge", "gt", nullptr }; + + void usage () { - AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)"; + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au) and J-Donald Tournier (jdtournier@gmail.com)"; SYNOPSIS = "Create bitwise image by thresholding image intensity"; DESCRIPTION - + "By default, an optimal threshold is determined using a parameter-free method. " - "Alternatively the threshold can be defined manually by the user."; - - REFERENCES - + "* If not using any manual thresholding option:\n" + + "The threshold value to be applied can be determined in one of a number of ways:" + + + "- If no relevant command-line option is used, the command will automatically " + "determine an optimal threshold;" + + + "- The -abs option provides the threshold value explicitly;" + + + "- The -percentile, -top and -bottom options enable more fine-grained control " + "over how the threshold value is determined." + + + "The -mask option only influences those image values that contribute " + "toward the determination of the threshold value; once the threshold is determined, " + "it is applied to the entire image, irrespective of use of the -mask option. If you " + "wish for the voxels outside of the specified mask to additionally be excluded from " + "the output mask, this can be achieved by providing the -out_masked option." + + + "The four operators available through the \"-comparison\" option (\"lt\", \"le\", \"ge\" and \"gt\") " + "correspond to \"less-than\" (<), \"less-than-or-equal\" (<=), \"greater-than-or-equal\" (>=) " + "and \"greater-than\" (>). This offers fine-grained control over how the thresholding " + "operation will behave in the presence of values equivalent to the threshold. " + "By default, the command will select voxels with values greater than or equal to the " + "determined threshold (\"ge\"); unless the -bottom option is used, in which case " + "after a threshold is determined from the relevant lowest-valued image voxels, those " + "voxels with values less than or equal to that threshold (\"le\") are selected. " + "This provides more fine-grained control than the -invert option; the latter " + "is provided for backwards compatibility, but is equivalent to selection of the " + "opposite comparison within this selection." + + + "If no output image path is specified, the command will instead write to " + "standard output the determined threshold value."; + + REFERENCES + + "* If not using any explicit thresholding mechanism: \n" "Ridgway, G. R.; Omar, R.; Ourselin, S.; Hill, D. L.; Warren, J. D. & Fox, N. C. " "Issues with threshold masking in voxel-based morphometry of atrophied brains. " "NeuroImage, 2009, 44, 99-111"; ARGUMENTS - + Argument ("input", "the input image to be thresholded.").type_image_in () - + Argument ("output", "the output binary image mask.").type_image_out (); + + Argument ("input", "the input image to be thresholded").type_image_in() + + Argument ("output", "the (optional) output binary image mask").optional().type_image_out(); OPTIONS - + Option ("abs", "specify threshold value as absolute intensity.") - + Argument ("value").type_float() - + Option ("percentile", "threshold the image at the ith percentile.") - + Argument ("value").type_float (0.0, 100.0) + + OptionGroup ("Threshold determination mechanisms") + + + Option ("abs", "specify threshold value as absolute intensity") + + Argument ("value").type_float() + + + Option ("percentile", "determine threshold based on some percentile of the image intensity distribution") + + Argument ("value").type_float (0.0, 100.0) + + + Option ("top", "determine threshold that will result in selection of some number of top-valued voxels") + + Argument ("count").type_integer (1) + + + Option ("bottom", "determine & apply threshold resulting in selection of some number of bottom-valued voxels " + "(note: implies threshold application operator of \"le\" unless otherwise specified)") + + Argument ("count").type_integer (1) + + - + Option ("top", "provide a mask of the N top-valued voxels") - + Argument ("N").type_integer (0) + + OptionGroup ("Threshold determination modifiers") - + Option ("bottom", "provide a mask of the N bottom-valued voxels") - + Argument ("N").type_integer (0) + + Option ("allvolumes", "compute a single threshold for all image volumes, rather than an individual threshold per volume") - + Option ("invert", "invert output binary mask.") + + Option ("ignorezero", "ignore zero-valued input values during threshold determination") - + Option ("toppercent", "provide a mask of the N%% top-valued voxels") - + Argument ("N").type_float (0.0, 100.0) + + Option ("mask", "compute the threshold based only on values within an input mask image") + + Argument ("image").type_image_in () - + Option ("bottompercent", "provide a mask of the N%% bottom-valued voxels") - + Argument ("N").type_float (0.0, 100.0) - + Option ("nan", "use NaN as the output zero value.") - + Option ("ignorezero", "ignore zero-valued input voxels.") + + OptionGroup ("Threshold application modifiers") + + + Option ("comparison", "comparison operator to use when applying the threshold; " + "options are: " + join(operator_list, ",") + " (default = \"le\" for -bottom; \"ge\" otherwise)") + + Argument ("choice").type_choice (operator_list) + + + Option ("invert", "invert the output binary mask " + "(equivalent to flipping the operator; provided for backwards compatibility)") + + + Option ("out_masked", "mask the output image based on the provided input mask image") + + + Option ("nan", "set voxels that fail the threshold to NaN rather than zero " + "(output image will be floating-point rather than binary)"); - + Option ("mask", "compute the optimal threshold based on voxels within a mask.") - + Argument ("image").type_image_in (); } -void run () -{ - default_type threshold_value (NaN), percentile (NaN), bottomNpercent (NaN), topNpercent (NaN); - size_t topN (0), bottomN (0), nopt (0); +using value_type = float; + + + +bool issue_degeneracy_warning = false; + + - auto opt = get_options ("abs"); +Image get_mask (const Image& in) +{ + Image mask; + auto opt = get_options ("mask"); if (opt.size()) { - threshold_value = opt[0][0]; - ++nopt; + mask = Image::open (opt[0][0]); + check_dimensions (in, mask, 0, 3); + for (size_t axis = 3; axis != mask.ndim(); ++axis) { + if (mask.size (axis) > 1 && axis < in.ndim() && mask.size (axis) != in.size (axis)) + throw Exception ("Dimensions of mask image do not match those of main image"); + } } + return mask; +} - opt = get_options ("percentile"); - if (opt.size()) { - percentile = opt[0][0]; - ++nopt; + +vector get_data (Image& in, + Image& mask, + const size_t max_axis, + const bool ignore_zero) +{ + vector data; + data.reserve (voxel_count (in, 0, max_axis)); + if (mask.valid()) { + Adapter::Replicate> mask_replicate (mask, in); + if (ignore_zero) { + for (auto l = Loop(in, 0, max_axis) (in, mask_replicate); l; ++l) { + if (mask_replicate.value() && !std::isnan (static_cast(in.value())) && in.value() != 0.0f) + data.push_back (in.value()); + } + } else { + for (auto l = Loop(in, 0, max_axis) (in, mask_replicate); l; ++l) { + if (mask_replicate.value() && !std::isnan (static_cast(in.value()))) + data.push_back (in.value()); + } + } + } else { + if (ignore_zero) { + for (auto l = Loop(in, 0, max_axis) (in); l; ++l) { + if (!std::isnan (static_cast(in.value())) && in.value() != 0.0f) + data.push_back (in.value()); + } + } else { + for (auto l = Loop(in, 0, max_axis) (in); l; ++l) { + if (!std::isnan (static_cast(in.value()))) + data.push_back (in.value()); + } + } } + if (!data.size()) + throw Exception ("No valid input data found; unable to determine threshold"); + return data; +} + + + +default_type calculate (Image& in, + Image& mask, + const size_t max_axis, + const default_type abs, + const default_type percentile, + const ssize_t bottom, + const ssize_t top, + const bool ignore_zero, + const bool to_cout) +{ + if (std::isfinite (abs)) { + + return abs; + + } else if (std::isfinite (percentile)) { + + auto data = get_data (in, mask, max_axis, ignore_zero); + if (percentile == 100.0) { + return default_type(*std::max_element (data.begin(), data.end())); + } else if (percentile == 0.0) { + return default_type(*std::min_element (data.begin(), data.end())); + } else { + const default_type interp_index = 0.01 * percentile * (data.size()-1); + const size_t lower_index = size_t(std::floor (interp_index)); + const default_type mu = interp_index - default_type(lower_index); + std::nth_element (data.begin(), data.begin() + lower_index, data.end()); + const default_type lower_value = default_type(data[lower_index]); + std::nth_element (data.begin(), data.begin() + lower_index + 1, data.end()); + const default_type upper_value = default_type(data[lower_index + 1]); + return (1.0-mu)*lower_value + mu*upper_value; + } + + } else if (std::max (bottom, top) >= 0) { + + auto data = get_data (in, mask, max_axis, ignore_zero); + const ssize_t index (bottom >= 0 ? + size_t(bottom) - 1 : + (ssize_t(data.size()) - ssize_t(top))); + if (index < 0 || index >= ssize_t(data.size())) + throw Exception ("Number of valid input image values (" + str(data.size()) + ") less than number of voxels requested via -" + (bottom >= 0 ? "bottom" : "top") + " option (" + str(bottom >= 0 ? bottom : top) + ")"); + std::nth_element (data.begin(), data.begin() + index, data.end()); + const value_type threshold_float = data[index]; + if (index) { + std::nth_element (data.begin(), data.begin() + index - 1, data.end()); + if (data[index-1] == threshold_float) + issue_degeneracy_warning = true; + } + if (index < ssize_t(data.size()) - 1) { + std::nth_element (data.begin(), data.begin() + index + 1, data.end()); + if (data[index+1] == threshold_float) + issue_degeneracy_warning = true; + } + return default_type(threshold_float); + + } else { // No explicit mechanism option: do automatic thresholding + + std::unique_ptr latch; + if (to_cout) + latch.reset (new LogLevelLatch (App::log_level - 1)); + if (max_axis < in.ndim()) { + + // Need to extract just the current 3D volume + vector in_from (in.ndim()), in_size (in.ndim()); + size_t axis; + for (axis = 0; axis != 3; ++axis) { + in_from[axis] = 0; + in_size[axis] = in.size (axis); + } + for (; axis != in.ndim(); ++axis) { + in_from[axis] = in.index (axis); + in_size[axis] = 1; + } + Adapter::Subset> in_subset (in, in_from, in_size); + if (mask.valid()) { + vector mask_from (mask.ndim()), mask_size (mask.ndim()); + for (axis = 0; axis != 3; ++axis) { + mask_from[axis] = 0; + mask_size[axis] = mask.size (axis); + } + for (; axis != mask.ndim(); ++axis) { + mask_from[axis] = mask.index (axis); + mask_size[axis] = 1; + } + Adapter::Subset> mask_subset (mask, mask_from, mask_size); + Adapter::Replicate mask_replicate (mask_subset, in_subset); + return Filter::estimate_optimal_threshold (in_subset, mask_replicate); + } else { + return Filter::estimate_optimal_threshold (in_subset); + } + + } else if (mask.valid()) { + Adapter::Replicate> mask_replicate (mask, in); + return Filter::estimate_optimal_threshold (in, mask_replicate); + } else { + return Filter::estimate_optimal_threshold (in); + } - opt = get_options ("top"); - if (opt.size()) { - topN = opt[0][0]; - ++nopt; } +} - opt = get_options ("bottom"); - if (opt.size()) { - bottomN = opt[0][0]; - ++nopt; + + +template +void apply (Image& in, + Image& mask, + Image& out, + const size_t max_axis, + const value_type threshold, + const operator_type comp, + const bool mask_out, + const bool to_cout) +{ + if (to_cout) { + std::cout << threshold; + return; } - opt = get_options ("toppercent"); - if (opt.size()) { - topNpercent = opt[0][0]; - ++nopt; + const T true_value = std::is_floating_point::value ? 1.0 : true; + const T false_value = std::is_floating_point::value ? NaN : false; + + std::function func; + switch (comp) { + case operator_type::LT: func = [] (const value_type in, const value_type ref) { return in < ref; }; break; + case operator_type::LE: func = [] (const value_type in, const value_type ref) { return in <= ref; }; break; + case operator_type::GE: func = [] (const value_type in, const value_type ref) { return in >= ref; }; break; + case operator_type::GT: func = [] (const value_type in, const value_type ref) { return in > ref; }; break; + case operator_type::UNDEFINED: assert (0); } - opt = get_options ("bottompercent"); - if (opt.size()) { - bottomNpercent = opt[0][0]; - ++nopt; + if (mask_out) { + assert (mask.valid()); + for (auto l = Loop(in, 0, max_axis) (in, mask, out); l; ++l) + out.value() = !std::isnan (static_cast(in.value())) && mask.value() && func (in.value(), threshold) ? true_value : false_value; + } else { + for (auto l = Loop(in, 0, max_axis) (in, out); l; ++l) + out.value() = !std::isnan (static_cast(in.value())) && func (in.value(), threshold) ? true_value : false_value; } +} - if (nopt > 1) - throw Exception ("too many conflicting options"); - bool invert = get_options ("invert").size(); - const bool use_NaN = get_options ("nan").size(); - const bool ignore_zeroes = get_options ("ignorezero").size(); - auto header = Header::open (argument[0]); - if (header.datatype().is_complex()) - throw Exception ("Cannot perform thresholding on complex images"); - auto in = header.get_image(); - if (voxel_count (in) < topN || voxel_count (in) < bottomN) - throw Exception ("number of voxels at which to threshold exceeds number of voxels in image"); - if (std::isfinite (percentile)) { - percentile /= 100.0; - if (percentile < 0.5) { - bottomN = std::round (voxel_count (in) * percentile); - invert = !invert; +template +void execute (Image& in, + Image& mask, + const std::string& out_path, + const default_type abs, + const default_type percentile, + const ssize_t bottom, + const ssize_t top, + const bool ignore_zero, + const bool all_volumes, + const operator_type op, + const bool mask_out) +{ + const bool to_cout = out_path.empty(); + Image out; + if (!to_cout) { + Header header_out (in); + header_out.datatype() = DataType::from(); + header_out.datatype().set_byte_order_native(); + out = Image::create (out_path, header_out); + } + + // Branch based on whether or not we need to process each image volume individually + if (in.ndim() > 3 && !all_volumes) { + + // Do one volume at a time + // If writing to cout, also add a newline between each volume + bool is_first_loop = true; + for (auto l = Loop("Determining and applying per-volume thresholds", 3, in.ndim()) (in); l; ++l) { + if (to_cout) { + if (is_first_loop) + is_first_loop = false; + else + std::cout << "\n"; + } + LogLevelLatch latch (App::log_level - 1); + const default_type threshold = calculate (in, mask, 3, abs, percentile, bottom, top, ignore_zero, to_cout); + if (out.valid()) + assign_pos_of (in, 3).to (out); + apply (in, mask, out, 3, value_type(threshold), op, mask_out, to_cout); } - else topN = std::round (voxel_count (in) * (1.0 - percentile)); + + return; + + } else if (in.ndim() <= 3 && all_volumes) { + WARN ("Option -allvolumes ignored; input image is less than 4D"); } - header.datatype() = use_NaN ? DataType::Float32 : DataType::Bit; + // Process whole input image as a single block + const default_type threshold = calculate (in, mask, in.ndim(), abs, percentile, bottom, top, ignore_zero, to_cout); + apply (in, mask, out, in.ndim(), value_type(threshold), op, mask_out, to_cout); + +} + + - auto out = Image::create (argument[1], header); - float zero = use_NaN ? NaN : 0.0; - float one = 1.0; - if (invert) std::swap (zero, one); - if (std::isfinite (topNpercent) || std::isfinite (bottomNpercent)) { - size_t count = 0; - for (auto l = Loop("computing voxel count", in) (in); l; ++l) { - if (ignore_zeroes && in.value() == 0.0) continue; - ++count; +void run () +{ + const default_type abs = get_option_value ("abs", NaN); + const default_type percentile = get_option_value ("percentile", NaN); + const ssize_t bottom = get_option_value ("bottom", -1); + const ssize_t top = get_option_value ("top", -1); + const size_t num_explicit_mechanisms = (std::isfinite (abs) ? 1 : 0) + + (std::isfinite (percentile) ? 1 : 0) + + (bottom >= 0 ? 1 : 0) + + (top >= 0 ? 1 : 0); + if (num_explicit_mechanisms > 1) + throw Exception ("Cannot specify more than one mechanism for threshold selection"); + + auto header_in = Header::open (argument[0]); + if (header_in.datatype().is_complex()) + throw Exception ("Cannot perform thresholding directly on complex image data"); + auto in = header_in.get_image(); + + const bool to_cout = argument.size() == 1; + const std::string output_path = to_cout ? std::string("") : argument[1]; + const bool all_volumes = get_options("allvolumes").size(); + const bool ignore_zero = get_options("ignorezero").size(); + const bool use_nan = get_options ("nan").size(); + const bool invert = get_options ("invert").size(); + + bool mask_out = get_options ("out_masked").size(); + + auto opt = get_options ("comparison"); + operator_type comp = opt.size() ? + operator_type(int(opt[0][0])) : + (bottom >= 0 ? operator_type::LE : operator_type::GE); + if (invert) { + switch (comp) { + case operator_type::LT: comp = operator_type::GE; break; + case operator_type::LE: comp = operator_type::GT; break; + case operator_type::GE: comp = operator_type::LT; break; + case operator_type::GT: comp = operator_type::LE; break; + case operator_type::UNDEFINED: assert (0); } - if (std::isfinite (topNpercent)) - topN = std::round (0.01 * topNpercent * count); - else - bottomN = std::round (0.01 * bottomNpercent * count); } - if (topN || bottomN) { - std::multimap > list; - - { - const std::string msg = "thresholding \"" + shorten (in.name()) + "\" at " + ( - std::isnan (percentile) ? - (str (topN ? topN : bottomN) + "th " + (topN ? "top" : "bottom") + " voxel") : - (str (percentile*100.0) + "\% percentile")); - - if (topN) { - for (auto l = Loop(in) (in); l; ++l) { - const float val = in.value(); - if (!std::isfinite (val)) continue; - if (ignore_zeroes && val == 0.0) continue; - if (list.size() == topN) { - if (val < list.begin()->first) continue; - list.erase (list.begin()); - } - vector pos (in.ndim()); - for (size_t n = 0; n < in.ndim(); ++n) - pos[n] = in.index(n); - list.insert (std::pair > (val, pos)); - } + if (to_cout) { + if (use_nan) { + WARN ("Option -nan ignored: has no influence when no output image is specified"); + } + if (opt.size()) { + WARN ("Option -comparison ignored: has no influence when no output image is specified"); + comp = operator_type::UNDEFINED; + } + if (invert) { + WARN ("Option -invert ignored: has no influence when no output image is specified"); + } + if (mask_out) { + WARN ("Option -out_masked ignored: has no influence when no output image is specified"); + } + } + + Image mask; + if (std::isfinite (abs)) { + if (ignore_zero) { + WARN ("-ignorezero option has no effect if combined with -abs option"); + } + if (get_options ("mask").size() && !mask_out) { + WARN ("-mask option has no effect if combined with -abs option and -out_masked is not used"); + } + } else { + mask = get_mask (in); + if (!mask.valid() && mask_out) { + WARN ("-out_masked option ignored; no mask image provided via -mask"); + mask_out = false; + } + if (!num_explicit_mechanisms) { + if (ignore_zero) { + WARN ("Option -ignorezero ignored by automatic threshold calculation"); } - else { - for (auto l = Loop(in) (in); l; ++l) { - const float val = in.value(); - if (!std::isfinite (val)) continue; - if (ignore_zeroes && val == 0.0) continue; - if (list.size() == bottomN) { - std::multimap >::iterator i = list.end(); - --i; - if (val > i->first) continue; - list.erase (i); - } - vector pos (in.ndim()); - for (size_t n = 0; n < in.ndim(); ++n) - pos[n] = in.index(n); - list.insert (std::pair > (val, pos)); - } + try { + check_3D_nonunity (in); + } catch (Exception& e) { + throw Exception (e, "Automatic thresholding can only be performed for voxel data"); } } + } - for (auto l = Loop(out) (out); l; ++l) - out.value() = zero; + if (use_nan) + execute (in, mask, output_path, abs, percentile, bottom, top, ignore_zero, all_volumes, comp, mask_out); + else + execute (in, mask, output_path, abs, percentile, bottom, top, ignore_zero, all_volumes, comp, mask_out); - for (std::multimap >::const_iterator i = list.begin(); i != list.end(); ++i) { - for (size_t n = 0; n < out.ndim(); ++n) - out.index(n) = i->second[n]; - out.value() = one; - } - } - else { - Image mask; - opt = get_options ("mask"); - if (opt.size()) - mask = Image::open (opt[0][0]); - if (std::isnan (threshold_value)) - threshold_value = Filter::estimate_optimal_threshold (in, mask); - - const std::string msg = "thresholding \"" + shorten (in.name()) + "\" at intensity " + str (threshold_value); - for (auto l = Loop(msg, in) (in, out); l; ++l) { - const float val = in.value(); - out.value() = ( !std::isfinite (val) || val < threshold_value ) ? zero : one; - } + if (issue_degeneracy_warning) { + WARN ("Duplicate image values surrounding threshold; " + "exact number of voxels influenced by numerical threshold may not match requested number"); } } diff --git a/cmd/mrtransform.cpp b/cmd/mrtransform.cpp index 98f75bed8f..f3d0f2a941 100644 --- a/cmd/mrtransform.cpp +++ b/cmd/mrtransform.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "image.h" @@ -34,6 +35,8 @@ #include "registration/warp/compose.h" #include "math/average_space.h" #include "math/SH.h" +#include "adapter/jacobian.h" +#include "file/nifti_utils.h" @@ -41,6 +44,7 @@ using namespace MR; using namespace App; const char* interp_choices[] = { "nearest", "linear", "cubic", "sinc", nullptr }; +const char* modulation_choices[] = { "fod", "jac", nullptr }; void usage () { @@ -53,15 +57,21 @@ void usage () + "If a linear transform is applied without a template image the command " "will modify the image header transform matrix" - + "FOD reorientation (with apodised point spread functions) will be performed " - "by default if the number of volumes in the 4th dimension equals the number " + + "FOD reorientation (with apodised point spread functions) can be performed " + "if the number of volumes in the 4th dimension equals the number " "of coefficients in an antipodally symmetric spherical harmonic series (e.g. " - "6, 15, 28 etc). The -no_reorientation option can be used to force " - "reorientation off if required." + "6, 15, 28 etc). For such data, the -reorient_fod yes/no option must be used to specify " + "if reorientation is required." + + + "The output image intensity can be modulated using the (local or global) volume change " + "if a linear or nonlinear transformation is applied. 'FOD' modulation preserves the " + "apparent fibre density across the fibre bundle width and can only be applied if FOD reorientation " + "is used. Alternatively, non-directional scaling by the Jacobian determinant can be applied to " + "any image type. " + "If a DW scheme is contained in the header (or specified separately), and " "the number of directions matches the number of volumes in the images, any " - "transformation applied using the -linear option will be also be applied to the directions." + "transformation applied using the -linear option will also be applied to the directions." + "When the -template option is used to specify the target image grid, the " "image provided via this option will not influence the axis data strides " @@ -77,7 +87,7 @@ void usage () + "* If FOD modulation is being performed:\n" "Raffelt, D.; Tournier, J.-D.; Rose, S.; Ridgway, G.R.; Henderson, R.; Crozier, S.; Salvado, O.; Connelly, A.; " // Internal "Apparent Fibre Density: a novel measure for the analysis of diffusion-weighted magnetic resonance images. " - "NeuroImage, 2012, 15;59(4), 3976-94."; + "NeuroImage, 2012, 15;59(4), 3976-94"; ARGUMENTS + Argument ("input", "input image to be transformed.").type_image_in () @@ -164,20 +174,27 @@ void usage () + OptionGroup ("Fibre orientation distribution handling options") + Option ("modulate", - "modulate FODs during reorientation to preserve the apparent fibre density across fibre bundle widths before and after the transformation") + "Valid choices are: fod and jac. \n" + "fod: modulate FODs during reorientation to preserve the apparent fibre density across fibre bundle widths before and after the transformation. \n" + "jac: modulate the image intensity with the determinant of the Jacobian of the warp of linear transformation " + "to preserve the total intensity before and after the transformation.") + + Argument ("intensity modulation method").type_choice (modulation_choices) + Option ("directions", "directions defining the number and orientation of the apodised point spread functions used in FOD reorientation " "(Default: 300 directions)") + Argument ("file", "a list of directions [az el] generated using the dirgen command.").type_file_in() - + Option ("noreorientation", - "turn off FOD reorientation. Reorientation is on by default if the number " + + Option ("reorient_fod", + "specify whether to perform FOD reorientation. This is required if the number " "of volumes in the 4th dimension corresponds to the number of coefficients in an " - "antipodally symmetric spherical harmonic series (i.e. 6, 15, 28, 45, 66 etc") + "antipodally symmetric spherical harmonic series with lmax >= 2 (i.e. 6, 15, 28, 45, 66 volumes.") + + Argument ("boolean").type_bool() + DWI::GradImportOptions() + + DWI::GradExportOptions() + + DataType::options () + Stride::Options @@ -185,23 +202,25 @@ void usage () + OptionGroup ("Additional generic options for mrtransform") + Option ("nan", - "Use NaN as the out of bounds value (Default: 0.0)"); + "Use NaN as the out of bounds value (Default: 0.0)") + + + Option ("no_reorientation", "deprecated, use -reorient_fod instead"); } void apply_warp (Image& input, Image& output, Image& warp, - const int interp, const float out_of_bounds_value, const vector& oversample) { + const int interp, const float out_of_bounds_value, const vector& oversample, const bool jacobian_modulate = false) { switch (interp) { case 0: - Filter::warp (input, output, warp, out_of_bounds_value, oversample); + Filter::warp (input, output, warp, out_of_bounds_value, oversample, jacobian_modulate); break; case 1: - Filter::warp (input, output, warp, out_of_bounds_value, oversample); + Filter::warp (input, output, warp, out_of_bounds_value, oversample, jacobian_modulate); break; case 2: - Filter::warp (input, output, warp, out_of_bounds_value, oversample); + Filter::warp (input, output, warp, out_of_bounds_value, oversample, jacobian_modulate); break; case 3: - Filter::warp (input, output, warp, out_of_bounds_value, oversample); + Filter::warp (input, output, warp, out_of_bounds_value, oversample, jacobian_modulate); break; default: assert (0); @@ -209,6 +228,14 @@ void apply_warp (Image& input, Image& output, Image& } } +void apply_linear_jacobian (Image & image, transform_type trafo) { + const float det = trafo.linear().topLeftCorner<3,3>().determinant(); + INFO("global intensity modulation with scale factor " + str(det)); + for (auto i = Loop ("applying global intensity modulation", image, 0, image.ndim()) (image); i; ++i) { + image.value() *= det; + } +} + void run () @@ -271,6 +298,11 @@ void run () opt = get_options ("warp_full"); Image warp; if (opt.size()) { + if (!Path::is_mrtrix_image (opt[0][0]) && !(Path::has_suffix (opt[0][0], {".nii", ".nii.gz"}) && + File::Config::get_bool ("NIfTIAutoLoadJSON", false) && + Path::exists(File::NIfTI::get_json_path(opt[0][0])))) + WARN ("warp_full image is not in original .mif/.mih file format or in NIfTI file format with associated JSON. " + "Converting to other file formats may remove linear transformations stored in the image header."); warp = Image::open (opt[0][0]).with_direct_io(); Registration::Warp::check_warp_full (warp); if (linear) @@ -349,15 +381,25 @@ void run () Stride::List stride = Stride::get (input_header); - // Detect FOD image for reorientation - opt = get_options ("noreorientation"); - bool fod_reorientation = false; + // Detect FOD image + bool is_possible_fod_image = input_header.ndim() == 4 && input_header.size(3) >= 6 && + input_header.size(3) == (int) Math::SH::NforL (Math::SH::LforN (input_header.size(3))); + + + // reorientation + if (get_options ("no_reorientation").size()) + throw Exception ("The -no_reorientation option is deprecated. Use -reorient_fod no instead."); + opt = get_options ("reorient_fod"); + bool fod_reorientation = opt.size() && bool(opt[0][0]); + if (is_possible_fod_image && !opt.size()) + throw Exception("-reorient_fod yes/no needs to be explicitly specified for images with " + + str(input_header.size(3)) + " volumes"); + else if (!is_possible_fod_image && fod_reorientation) + throw Exception("Apodised PSF reorientation requires SH series images"); + Eigen::MatrixXd directions_cartesian; - if (!opt.size() && (linear || warp.valid() || template_header.valid()) && input_header.ndim() == 4 && - input_header.size(3) >= 6 && - input_header.size(3) == (int) Math::SH::NforL (Math::SH::LforN (input_header.size(3)))) { - CONSOLE ("SH series detected, performing apodised PSF reorientation"); - fod_reorientation = true; + if (fod_reorientation && (linear || warp.valid() || template_header.valid()) && is_possible_fod_image) { + CONSOLE ("performing apodised PSF reorientation"); Eigen::MatrixXd directions_az_el; opt = get_options ("directions"); @@ -371,12 +413,35 @@ void run () stride = Stride::contiguous_along_axis (3, input_header); } - // Modulate FODs - bool modulate = false; - if (get_options ("modulate").size()) { - modulate = true; + // Intensity / FOD modulation + opt = get_options ("modulate"); + bool modulate_fod = opt.size() && (int) opt[0][0] == 0; + bool modulate_jac = opt.size() && (int) opt[0][0] == 1; + + const std::string reorient_msg = str("reorienting") + str((modulate_fod ? " with FOD modulation" : "")); + if (modulate_fod) + add_line (output_header.keyval()["comments"], std::string ("FOD modulation applied")); + if (modulate_jac) + add_line (output_header.keyval()["comments"], std::string ("Jacobian determinant modulation applied")); + + if (modulate_fod) { + if (!is_possible_fod_image) + throw Exception ("FOD modulation can only be performed with SH series image"); if (!fod_reorientation) - throw Exception ("modulation can only be performed with FOD reorientation"); + throw Exception ("FOD modulation can only be performed with FOD reorientation"); + } + + if (modulate_jac) { + if (fod_reorientation) { + WARN ("Input image being interpreted as FOD data (user requested FOD reorientation); " + "FOD-based modulation would be more appropriate for such data than the requested Jacobian modulation."); + } else if (is_possible_fod_image) { + WARN ("Jacobian modulation performed on possible SH series image. Did you mean FOD modulation?"); + } + + if (!linear && !warp.valid()) + throw Exception ("Jacobian modulation requires linear or nonlinear transformation"); + } @@ -475,8 +540,7 @@ void run () for (const auto x : oversample) if (x < 1) throw Exception ("-oversample factors must be positive integers"); - } - else if (interp == 0) + } else if (interp == 0) // default for nearest-neighbour is no oversampling oversample = { 1, 1, 1 }; @@ -499,13 +563,11 @@ void run () if (get_options ("midway_space").size()) { INFO("regridding to midway space"); - vector
headers; - headers.push_back(input_header); - headers.push_back(template_header); - vector> void_trafo; - auto padding = Eigen::Matrix(1.0, 1.0, 1.0, 1.0); - int subsampling = 1; - auto midway_header = compute_minimum_average_header (headers, subsampling, padding, void_trafo); + if (!half) + WARN ("regridding to midway_space assumes the linear transformation to be a transformation from input to midway space. Use -half if the input transformation is a full transformation."); + transform_type linear_transform_inverse; + linear_transform_inverse.matrix() = linear_transform.inverse().matrix(); + auto midway_header = compute_minimum_average_header (input_header, template_header, linear_transform_inverse, linear_transform); for (size_t i = 0; i < 3; ++i) { output_header.size(i) = midway_header.size(i); output_header.spacing(i) = midway_header.spacing(i); @@ -536,7 +598,10 @@ void run () } if (fod_reorientation) - Registration::Transform::reorient ("reorienting", output, output, linear_transform, directions_cartesian.transpose(), modulate); + Registration::Transform::reorient (reorient_msg.c_str(), output, output, linear_transform, directions_cartesian.transpose(), modulate_fod); + + if (modulate_jac) + apply_linear_jacobian (output, linear_transform); } else if (warp.valid()) { @@ -564,25 +629,30 @@ void run () } else { warp_deform = Registration::Warp::compute_full_deformation (warp, template_header, from); } - apply_warp (input, output, warp_deform, interp, out_of_bounds_value, oversample); + apply_warp (input, output, warp_deform, interp, out_of_bounds_value, oversample, modulate_jac); if (fod_reorientation) - Registration::Transform::reorient_warp ("reorienting", output, warp_deform, directions_cartesian.transpose(), modulate); + Registration::Transform::reorient_warp (reorient_msg.c_str(), + output, warp_deform, directions_cartesian.transpose(), modulate_fod); // Compose and apply input linear and 4D deformation field } else if (warp.ndim() == 4 && linear) { auto warp_composed = Image::scratch (warp); Registration::Warp::compose_linear_deformation (linear_transform, warp, warp_composed); - apply_warp (input, output, warp_composed, interp, out_of_bounds_value, oversample); + apply_warp (input, output, warp_composed, interp, out_of_bounds_value, oversample, modulate_jac); if (fod_reorientation) - Registration::Transform::reorient_warp ("reorienting", output, warp_composed, directions_cartesian.transpose(), modulate); + Registration::Transform::reorient_warp (reorient_msg.c_str(), + output, warp_composed, directions_cartesian.transpose(), modulate_fod); // Apply 4D deformation field only } else { - apply_warp (input, output, warp, interp, out_of_bounds_value, oversample); + apply_warp (input, output, warp, interp, out_of_bounds_value, oversample, modulate_jac); if (fod_reorientation) - Registration::Transform::reorient_warp ("reorienting", output, warp, directions_cartesian.transpose(), modulate); + Registration::Transform::reorient_warp (reorient_msg.c_str(), + output, warp, directions_cartesian.transpose(), modulate_fod); } + DWI::export_grad_commandline (output); + // No reslicing required, so just modify the header and do a straight copy of the data } else { @@ -604,12 +674,17 @@ void run () auto output = Image::create (argument[1], output_header).with_direct_io(); copy_with_progress (input, output); - if (fod_reorientation) { + if (fod_reorientation || modulate_jac) { transform_type transform = linear_transform; if (replace) transform = linear_transform * output_header.transform().inverse(); - Registration::Transform::reorient ("reorienting", output, output, transform, directions_cartesian.transpose()); + if (fod_reorientation) + Registration::Transform::reorient ("reorienting", output, output, transform, directions_cartesian.transpose()); + if (modulate_jac) + apply_linear_jacobian (output, transform); } + + DWI::export_grad_commandline (output); } } diff --git a/cmd/mrview.cpp b/cmd/mrview.cpp index d77aa65b68..a34724e1e4 100644 --- a/cmd/mrview.cpp +++ b/cmd/mrview.cpp @@ -1,26 +1,29 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "gui/gui.h" #include "command.h" #include "progressbar.h" #include "memory.h" #include "gui/mrview/icons.h" #include "gui/mrview/window.h" +#include "gui/mrview/file_open.h" #include "gui/mrview/mode/list.h" #include "gui/mrview/tool/list.h" +#include "gui/mrview/sync/syncmanager.h" using namespace MR; @@ -36,7 +39,7 @@ void usage () "Max Pietsch (maximilian.pietsch@kcl.ac.uk), " "Thijs Dhollander (thijs.dhollander@gmail.com)"; - SYNOPSIS = "The MRtrix image viewer."; + SYNOPSIS = "The MRtrix image viewer"; DESCRIPTION + "Any images listed as arguments will be loaded and available through the " @@ -87,6 +90,7 @@ void usage () void run () { GUI::MRView::Window window; + MR::GUI::MRView::Sync::SyncManager sync;//sync allows syncing between mrview windows in different processes window.show(); try { window.parse_arguments(); diff --git a/cmd/mtnormalise.cpp b/cmd/mtnormalise.cpp index 65053ed257..b493566eb8 100644 --- a/cmd/mtnormalise.cpp +++ b/cmd/mtnormalise.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "algo/loop.h" @@ -24,7 +25,7 @@ using namespace MR; using namespace App; -#define DEFAULT_NORM_VALUE 0.28209479177 +#define DEFAULT_REFERENCE_VALUE 0.28209479177 #define DEFAULT_MAIN_ITER_VALUE 15 #define DEFAULT_BALANCE_MAXITER_VALUE 7 #define DEFAULT_POLY_ORDER 3 @@ -33,71 +34,133 @@ const char* poly_order_choices[] = { "0", "1", "2", "3", nullptr }; void usage () { - AUTHOR = "Thijs Dhollander (thijs.dhollander@gmail.com), Rami Tabbara (rami.tabbara@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)"; + AUTHOR = "Thijs Dhollander (thijs.dhollander@gmail.com), Rami Tabbara (rami.tabbara@florey.edu.au), " + "David Raffelt (david.raffelt@florey.edu.au), Jonas Rosnarho-Tornstrand (jonas.rosnarho-tornstrand@kcl.ac.uk) " + "and J-Donald Tournier (jdtournier@gmail.com)"; SYNOPSIS = "Multi-tissue informed log-domain intensity normalisation"; DESCRIPTION - + "This command inputs any number of tissue components (e.g. from multi-tissue CSD) " - "and outputs corresponding normalised tissue components. Intensity normalisation is " - "performed in the log-domain, and can smoothly vary spatially to accomodate the " - "effects of (residual) intensity inhomogeneities." - - + "The -mask option is mandatory and is optimally provided with a brain mask " - "(such as the one obtained from dwi2mask earlier in the processing pipeline). " - "Outlier areas with exceptionally low or high combined tissue contributions are " - "accounted for and reoptimised as the intensity inhomogeneity estimation becomes " - "more accurate." - - + "Example usage: mtnormalise wmfod.mif wmfod_norm.mif gm.mif gm_norm.mif csf.mif csf_norm.mif -mask mask.mif."; + + "This command takes as input any number of tissue components (e.g. from " + "multi-tissue CSD) and outputs corresponding normalised tissue components " + "corrected for the effects of (residual) intensity inhomogeneities. " + "Intensity normalisation is performed by optimising the voxel-wise sum of " + "all tissue compartments towards a constant value, under constraints of " + "spatial smoothness (polynomial basis of a given order). Different to " + "the Raffelt et al. 2017 abstract, this algorithm performs this task " + "in the log-domain instead, with added gradual outlier rejection, different " + "handling of the balancing factors between tissue compartments and a " + "different iteration structure." + + + "The -mask option is mandatory and is optimally provided with a brain mask " + "(such as the one obtained from dwi2mask earlier in the processing pipeline). " + "Outlier areas with exceptionally low or high combined tissue contributions are " + "accounted for and reoptimised as the intensity inhomogeneity estimation becomes " + "more accurate."; + + EXAMPLES + + Example ("Default usage (for 3-tissue CSD compartments)", + "mtnormalise wmfod.mif wmfod_norm.mif gm.mif gm_norm.mif csf.mif csf_norm.mif -mask mask.mif", + "Note how for each tissue compartment, the input and output images are provided as " + "a consecutive pair."); ARGUMENTS - + Argument ("input output", "list of all input and output tissue compartment files. See example usage in the description.").type_image_in().allow_multiple(); + + Argument ("input output", "list of all input and output tissue compartment files (see example usage).").type_various().allow_multiple(); OPTIONS + Option ("mask", "the mask defines the data used to compute the intensity normalisation. This option is mandatory.").required () + Argument ("image").type_image_in () - + Option ("order", "the maximum order of the polynomial basis used to fit the normalisation field in the log-domain. An order of 0 is equivalent to not allowing spatial variance of the intensity normalisation factor. (default: " + str(DEFAULT_POLY_ORDER) + ")") + + Option ("order", "the maximum order of the polynomial basis used to fit the normalisation field in the log-domain. " + "An order of 0 is equivalent to not allowing spatial variance of the intensity normalisation factor. " + "(default: " + str(DEFAULT_POLY_ORDER) + ")") + Argument ("number").type_choice (poly_order_choices) - + Option ("niter", "set the number of iterations. (default: " + str(DEFAULT_MAIN_ITER_VALUE) + ")") - + Argument ("number").type_integer() + + Option ("niter", "set the number of iterations. The first (and potentially only) entry applies to the main loop. " + "If supplied as a comma-separated list of integers, the second entry applies to the inner loop to update the balance factors " + "(default: " + str(DEFAULT_MAIN_ITER_VALUE) + "," + str(DEFAULT_BALANCE_MAXITER_VALUE) + ").") + + Argument ("number").type_sequence_int() + + + Option ("reference", "specify the (positive) reference value to which the summed tissue compartments will be normalised. " + "(default: " + str(DEFAULT_REFERENCE_VALUE, 6) + ", SH DC term for unit angular integral)") + + Argument ("number").type_float (std::numeric_limits::min()) + + + Option ("balanced", "incorporate the per-tissue balancing factors into scaling of the output images " + "(NOTE: use of this option has critical consequences for AFD intensity normalisation; " + "should not be used unless these consequences are fully understood)") + + + OptionGroup ("Debugging options") + Option ("check_norm", "output the final estimated spatially varying intensity level that is used for normalisation.") + Argument ("image").type_image_out () + Option ("check_mask", "output the final mask used to compute the normalisation. " - "This mask excludes regions identified as outliers by the optimisation process.") + "This mask excludes regions identified as outliers by the optimisation process.") + Argument ("image").type_image_out () - + Option ("value", "specify the (positive) reference value to which the summed tissue compartments will be normalised. " - "(default: " + str(DEFAULT_NORM_VALUE, 6) + ", SH DC term for unit angular integral)") - + Argument ("number").type_float (std::numeric_limits::min()); + + Option ("check_factors", "output the tissue balance factors computed during normalisation.") + + Argument ("file").type_file_out (); + + REFERENCES + + "Raffelt, D.; Dhollander, T.; Tournier, J.-D.; Tabbara, R.; Smith, R. E.; Pierre, E. & Connelly, A. " // Internal + "Bias Field Correction and Intensity Normalisation for Quantitative Analysis of Apparent Fibre Density. " + "In Proc. ISMRM, 2017, 26, 3541"; } +using ValueType = float; +using ImageType = Image; +using MaskType = Image; +using IndexType = Image; + +// Function to get the number of basis vectors based on the desired order +int num_basis_vec_for_order (int order) +{ + switch (order) { + case 0: return 1; + case 1: return 4; + case 2: return 10; + default: return 20; + } + assert (false); + return -1; +}; -template + + + + +// Struct to get user specified number of basis functions struct PolyBasisFunction { MEMALIGN (PolyBasisFunction) + PolyBasisFunction(const int order) : + n_basis_vecs (num_basis_vec_for_order (order)) { }; - const int n_basis_vecs = 20; + const int n_basis_vecs; - FORCE_INLINE Eigen::MatrixXd operator () (const Eigen::Vector3& pos) { + FORCE_INLINE Eigen::VectorXd operator () (const Eigen::Vector3& pos) const { double x = pos[0]; double y = pos[1]; double z = pos[2]; - Eigen::MatrixXd basis(n_basis_vecs, 1); + Eigen::VectorXd basis (n_basis_vecs); basis(0) = 1.0; + if (n_basis_vecs < 4) + return basis; + basis(1) = x; basis(2) = y; basis(3) = z; + if (n_basis_vecs < 10) + return basis; + basis(4) = x * x; basis(5) = y * y; basis(6) = z * z; basis(7) = x * y; basis(8) = x * z; basis(9) = y * z; + if (n_basis_vecs < 20) + return basis; + basis(10) = x * x * x; basis(11) = y * y * y; basis(12) = z * z * z; @@ -113,416 +176,418 @@ struct PolyBasisFunction { MEMALIGN (PolyBasisFunction) }; -template <> -struct PolyBasisFunction<0> { MEMALIGN (PolyBasisFunction<0>) - const int n_basis_vecs = 1; - - PolyBasisFunction() {} - FORCE_INLINE Eigen::MatrixXd operator () (const Eigen::Vector3&) { - Eigen::MatrixXd basis(n_basis_vecs, 1); - basis(0) = 1.0; - return basis; - } -}; -template <> -struct PolyBasisFunction<1> { MEMALIGN (PolyBasisFunction<1>) - const int n_basis_vecs = 4; - FORCE_INLINE Eigen::MatrixXd operator () (const Eigen::Vector3& pos) { - double x = pos[0]; - double y = pos[1]; - double z = pos[2]; - Eigen::MatrixXd basis(n_basis_vecs, 1); - basis(0) = 1.0; - basis(1) = x; - basis(2) = y; - basis(3) = z; - return basis; - } -}; -template <> -struct PolyBasisFunction<2> { MEMALIGN (PolyBasisFunction<2>) - const int n_basis_vecs = 10; - FORCE_INLINE Eigen::MatrixXd operator () (const Eigen::Vector3& pos) { - double x = pos[0]; - double y = pos[1]; - double z = pos[2]; - Eigen::MatrixXd basis(n_basis_vecs, 1); - basis(0) = 1.0; - basis(1) = x; - basis(2) = y; - basis(3) = z; - basis(4) = x * x; - basis(5) = y * y; - basis(6) = z * z; - basis(7) = x * y; - basis(8) = x * z; - basis(9) = y * z; - return basis; - } -}; +IndexType index_mask_voxels (size_t& num_voxels) +{ + auto opt = get_options ("mask"); + auto mask = MaskType::open (opt[0][0]); + check_effective_dimensionality (mask, 3); + if (voxel_count (mask, 0, 3) >= std::numeric_limits::max()-1) + throw Exception ("mask size exceeds maximum supported using 32-bit integer"); + num_voxels = 0; -// Removes non-physical voxels from the mask -FORCE_INLINE void refine_mask (Image& summed, - Image& initial_mask, - Image& refined_mask) { + Header header (mask); + header.ndim() = 3; + header.datatype() = DataType::UInt32; + IndexType index = IndexType::scratch (header, "index"); - for (auto i = Loop (summed, 0, 3) (summed, initial_mask, refined_mask); i; ++i) { - if (std::isfinite((float) summed.value ()) && summed.value () > 0.f && initial_mask.value ()) - refined_mask.value () = true; + for (auto l = Loop(0, 3) (mask, index); l; ++l) { + if (mask.value()) + index.value() = num_voxels++; else - refined_mask.value () = false; + index.value() = std::numeric_limits::max(); } + + if (!num_voxels) + throw Exception ("Mask contains no valid voxels."); + + INFO ("mask image contains " + str(num_voxels) + " voxels"); + + return index; } -template void run_primitive (); -void run () -{ - if (argument.size() % 2) - throw Exception ("The number of arguments must be even, provided as pairs of each input and its corresponding output file."); - // Get poly-order of basis function - auto opt = get_options ("order"); - if (opt.size ()) { - const int order = int(opt[0][0]); - switch (order) { - case 0: - run_primitive<0> (); - break; - case 1: - run_primitive<1> (); - break; - case 2: - run_primitive<2> (); - break; - default: - run_primitive (); - break; +Eigen::MatrixXd initialise_basis (IndexType& index, size_t num_voxels, int order) +{ + struct BasisInitialiser { NOMEMALIGN + BasisInitialiser (const Transform& transform, const PolyBasisFunction& basis_function, Eigen::MatrixXd& basis) : + basis_function (basis_function), + transform (transform), + basis (basis) { } + + void operator() (IndexType& index) { + const uint32_t idx = index.value(); + if (idx != std::numeric_limits::max()) { + assert (idx < basis.rows()); + Eigen::Vector3 vox (index.index(0), index.index(1), index.index(2)); + Eigen::Vector3 pos = transform.voxel2scanner * vox; + basis.row(idx) = basis_function (pos); + } } - } else - run_primitive (); -} + const PolyBasisFunction& basis_function; + const Transform& transform; + Eigen::MatrixXd& basis; + }; -template -void run_primitive () { + INFO ("initialising basis..."); - PolyBasisFunction basis_function; + PolyBasisFunction basis_function (order); + Transform transform (index); + Eigen::MatrixXd basis (num_voxels, num_basis_vec_for_order (order)); - using ImageType = Image; - using MaskType = Image; + ThreadedLoop (index, 0, 3, 2).run (BasisInitialiser (transform, basis_function, basis), index); + return basis; +} - vector> input_images; - vector
output_headers; - vector output_filenames; - ProgressBar input_progress ("loading input images"); - // Open input images and prepare output image headers - for (size_t i = 0; i < argument.size(); i += 2) { - input_progress++; - auto image = ImageType::open (argument[i]); - if (image.ndim () > 4) - throw Exception ("Input image \"" + image.name() + "\" contains more than 4 dimensions."); - // Elevate image dimensions to ensure it is 4-dimensional - // e.g. x,y,z -> x,y,z,1 - // This ensures consistency across multiple tissue input images - Header h_image4d (image); - h_image4d.ndim() = 4; +void load_data (Eigen::MatrixXd& data, const std::string& image_name, IndexType& index) +{ + static int num = 0; - input_images.emplace_back (image, h_image4d); + auto in = ImageType::open (image_name); + check_dimensions (index, in, 0, 3); - if (i > 0) - check_dimensions (input_images[0], input_images[i / 2], 0, 3); + struct Loader { NOMEMALIGN + public: + Loader (Eigen::MatrixXd& data, int num) : data (data), num (num) { } - if (Path::exists (argument[i + 1]) && !App::overwrite_files) - throw Exception ("Output file \"" + argument[i] + "\" already exists. (use -force option to force overwrite)"); + void operator() (ImageType& in, IndexType& index) { + const uint32_t idx = index.value(); + if (idx != std::numeric_limits::max()) + data(idx, num) = std::max (in.value(), 0.0); + } + Eigen::MatrixXd& data; + const int num; + }; - output_headers.push_back (std::move (h_image4d)); - output_filenames.push_back (argument[i + 1]); - } + ThreadedLoop (in, 0, 3, 2).run (Loader (data, num), in, index); + ++num; +} - const size_t n_tissue_types = input_images.size(); - // Load the mask and refine the initial mask to exclude non-positive summed tissue components - Header header_3D (input_images[0]); - header_3D.ndim() = 3; - auto opt = get_options ("mask"); - auto orig_mask = MaskType::open (opt[0][0]); - auto initial_mask = MaskType::scratch (orig_mask, "Initial processing mask"); - auto mask = MaskType::scratch (orig_mask, "Processing mask"); - auto prev_mask = MaskType::scratch (orig_mask, "Previous processing mask"); - { - auto summed = ImageType::scratch (header_3D, "Summed tissue volumes"); - for (size_t j = 0; j < input_images.size(); ++j) { - input_progress++; +size_t detect_outliers ( + double outlier_range, + const Eigen::MatrixXd& data, + const Eigen::VectorXd& field, + const Eigen::VectorXd& balance_factors, + Eigen::VectorXd& weights) +{ + Eigen::VectorXd summed_log = (data * balance_factors).cwiseQuotient (field).array().log(); + Eigen::VectorXd summed_log_sorted = summed_log; + const size_t lower_quartile_idx = std::round (field.size() * 0.25); + const size_t upper_quartile_idx = std::round (field.size() * 0.75); - for (auto i = Loop (0, 3) (summed, input_images[j]); i; ++i) - summed.value() += input_images[j].value(); - } - refine_mask (summed, orig_mask, initial_mask); - } + std::nth_element (summed_log_sorted.data(), summed_log_sorted.data() + lower_quartile_idx, + summed_log_sorted.data() + summed_log_sorted.size()); + double lower_quartile = summed_log_sorted[lower_quartile_idx]; - threaded_copy (initial_mask, mask); + std::nth_element (summed_log_sorted.data(), summed_log_sorted.data() + upper_quartile_idx, + summed_log_sorted.data() + summed_log_sorted.size()); + double upper_quartile = summed_log_sorted[upper_quartile_idx]; + INFO (" outlier rejection quartiles: [ " + str(lower_quartile) + " " + str(upper_quartile) + " ]"); - // Load input images into single 4d-image and zero-clamp combined-tissue image - Header h_combined_tissue (input_images[0]); - h_combined_tissue.ndim () = 4; - h_combined_tissue.size (3) = n_tissue_types; - auto combined_tissue = ImageType::scratch (h_combined_tissue, "Tissue components"); + double lower_outlier_threshold = lower_quartile - outlier_range * (upper_quartile - lower_quartile); + double upper_outlier_threshold = upper_quartile + outlier_range * (upper_quartile - lower_quartile); - for (size_t i = 0; i < n_tissue_types; ++i) { - input_progress++; + struct SetWeight { NOMEMALIGN + size_t& changed; + const double lower_outlier_threshold, upper_outlier_threshold; - combined_tissue.index (3) = i; - for (auto l = Loop (0, 3) (combined_tissue, input_images[i]); l; ++l) { - combined_tissue.value () = std::max(input_images[i].value (), 0.f); + double operator() (double v, double w) const { + v = std::isfinite (v) && v >= lower_outlier_threshold && v <= upper_outlier_threshold; + if (v != w) + ++changed; + return v; } - } + }; - size_t num_voxels = 0; - for (auto i = Loop (mask) (mask); i; ++i) { - if (mask.value()) - num_voxels++; - } + size_t changed = 0; + SetWeight set_weight = { changed, lower_outlier_threshold, upper_outlier_threshold }; + weights = summed_log.binaryExpr (weights, set_weight); - if (!num_voxels) - throw Exception ("Mask contains no valid voxels."); + return changed; +} - const float normalisation_value = get_option_value ("value", DEFAULT_NORM_VALUE); - const float log_norm_value = std::log (normalisation_value); - const size_t max_iter = get_option_value ("niter", DEFAULT_MAIN_ITER_VALUE); - const size_t max_balance_iter = DEFAULT_BALANCE_MAXITER_VALUE; - // Initialise normalisation fields in both image and log domain - Eigen::MatrixXd norm_field_weights; - auto norm_field_image = ImageType::scratch (header_3D, "Normalisation field (intensity)"); - auto norm_field_log = ImageType::scratch (header_3D, "Normalisation field (log-domain)"); - for (auto i = Loop(norm_field_log) (norm_field_image, norm_field_log); i; ++i) { - norm_field_image.value() = 1.f; - norm_field_log.value() = 0.f; - } - Eigen::VectorXd balance_factors (Eigen::VectorXd::Ones (n_tissue_types)); - size_t iter = 1; +void compute_balance_factors ( + const Eigen::MatrixXd& data, + const Eigen::VectorXd& field, + const Eigen::VectorXd& weights, + Eigen::VectorXd& balance_factors) +{ + Eigen::MatrixXd scaled_data = data.transpose().array().rowwise() * weights.cwiseQuotient (field).transpose().array(); + Eigen::MatrixXd HtH (data.cols(), data.cols()); + HtH.triangularView() = scaled_data * scaled_data.transpose(); + Eigen::LLT llt; + balance_factors.noalias() = llt.compute (HtH.triangularView()).solve (scaled_data * Eigen::VectorXd::Ones(field.size())); + + // Ensure our balance factors satisfy the condition that sum(log(balance_factors)) = 0 + if (!balance_factors.allFinite() || (balance_factors.array() <= 0.0).any()) + throw Exception ("Non-positive tissue balance factor was computed." + " Balance factors: " + str(balance_factors.transpose())); + + balance_factors /= std::exp (balance_factors.array().log().sum() / data.cols()); +} - // Store lambda-function for performing outlier-rejection. - // We perform a coarse outlier-rejection initially as well as - // a finer outlier-rejection within each iteration of the - // tissue (re)balancing loop - auto outlier_rejection = [&](float outlier_range) { - auto summed_log = ImageType::scratch (header_3D, "Log of summed tissue volumes"); - for (auto i = Loop (0, 3) (summed_log, combined_tissue, norm_field_image); i; ++i) { - for (size_t j = 0; j < n_tissue_types; ++j) { - combined_tissue.index(3) = j; - summed_log.value() += balance_factors(j) * combined_tissue.value() / norm_field_image.value(); - } - summed_log.value() = std::log(summed_log.value()); - } - threaded_copy (initial_mask, mask); - vector summed_log_values; - summed_log_values.reserve (num_voxels); - for (auto i = Loop (0, 3) (mask, summed_log); i; ++i) { - if (mask.value()) - summed_log_values.push_back (summed_log.value()); +void update_field ( + const double log_norm_value, + const Eigen::MatrixXd& basis, + const Eigen::MatrixXd& data, + const Eigen::VectorXd& balance_factors, + const Eigen::VectorXd& weights, + Eigen::VectorXd& field_coeffs, + Eigen::VectorXd& field) +{ + struct LogWeight { NOMEMALIGN + double operator() (double sum, double weight) const { + return sum > 0.0 ? weight * (std::log(sum) - log_norm_value) : 0.0; } + const double log_norm_value; + }; + LogWeight logweight = { log_norm_value }; - num_voxels = summed_log_values.size(); - - const auto lower_quartile_it = summed_log_values.begin() + std::round ((float)num_voxels * 0.25f); - std::nth_element (summed_log_values.begin(), lower_quartile_it, summed_log_values.end()); - const float lower_quartile = *lower_quartile_it; - const auto upper_quartile_it = summed_log_values.begin() + std::round ((float)num_voxels * 0.75f); - std::nth_element (lower_quartile_it, upper_quartile_it, summed_log_values.end()); - const float upper_quartile = *upper_quartile_it; - const float lower_outlier_threshold = lower_quartile - outlier_range * (upper_quartile - lower_quartile); - const float upper_outlier_threshold = upper_quartile + outlier_range * (upper_quartile - lower_quartile); - - for (auto i = Loop (0, 3) (mask, summed_log); i; ++i) { - if (mask.value()) { - if (summed_log.value() < lower_outlier_threshold || summed_log.value() > upper_outlier_threshold) { - mask.value() = 0; - num_voxels--; - } - } - } + Eigen::VectorXd logsum = data * balance_factors; + logsum = logsum.binaryExpr (weights, logweight); - if (log_level >= 3) - display (mask); - }; + Eigen::MatrixXd HtH = Eigen::MatrixXd::Zero (basis.cols(), basis.cols()); + HtH.selfadjointView().rankUpdate ((basis.transpose().array().rowwise() * weights.transpose().array()).matrix()); - input_progress.done (); - ProgressBar progress ("performing log-domain intensity normalisation", max_iter); + Eigen::LLT llt; + field_coeffs.noalias() = llt.compute (HtH.selfadjointView()).solve (basis.transpose() * logsum); - // Perform an initial outlier rejection prior to the first iteration - outlier_rejection (3.f); + field.noalias() = (basis * field_coeffs).array().exp().matrix(); +} - threaded_copy (mask, prev_mask); - while (iter <= max_iter) { - INFO ("Iteration: " + str(iter)); - // Iteratively compute tissue balance factors with outlier rejection - size_t balance_iter = 1; - bool balance_converged = false; - while (!balance_converged && balance_iter <= max_balance_iter) { +ImageType compute_full_field (int order, const Eigen::VectorXd& field_coeffs, const IndexType& index) +{ + Header header (index); + header.datatype() = DataType::Float32; - DEBUG ("Balance and outlier rejection iteration " + str(balance_iter) + " starts."); + auto out = ImageType::scratch (header, "full field"); + Transform transform (out); - if (n_tissue_types > 1) { + struct FieldWriter { NOMEMALIGN + void operator() (ImageType& field) const { + Eigen::Vector3 vox (field.index(0), field.index(1), field.index(2)); + Eigen::Vector3 pos = transform.voxel2scanner * vox; + field.value() = std::exp (basis_function (pos).dot (field_coeffs)); + } - // Solve for tissue balance factors - Eigen::MatrixXd X (num_voxels, n_tissue_types); - Eigen::VectorXd y (Eigen::VectorXd::Ones (num_voxels)); - uint32_t index = 0; + const PolyBasisFunction& basis_function; + const Eigen::VectorXd& field_coeffs; + const Transform& transform; + }; - for (auto i = Loop (0, 3) (mask, combined_tissue, norm_field_image); i; ++i) { - if (mask.value()) { - for (size_t j = 0; j < n_tissue_types; ++j) { - combined_tissue.index (3) = j; - X (index, j) = combined_tissue.value() / norm_field_image.value(); - } - ++index; - } - } + PolyBasisFunction basis_function (order); - balance_factors = X.colPivHouseholderQr().solve(y); + FieldWriter writer = { basis_function, field_coeffs, transform }; - // Ensure our balance factors satisfy the condition that sum(log(balance_factors)) = 0 - double log_sum = 0.0; - for (size_t j = 0; j < n_tissue_types; ++j) { - if (balance_factors(j) <= 0.0) - throw Exception ("Non-positive tissue balance factor was computed." - " Tissue index: " + str(j+1) + " Balance factor: " + str(balance_factors(j)) + - " Needs to be strictly positive!"); - log_sum += std::log (balance_factors(j)); - } - balance_factors /= std::exp (log_sum / n_tissue_types); - } + ThreadedLoop (out, 0, 3).run (writer, out); - INFO ("Balance factors (" + str(balance_iter) + "): " + str(balance_factors.transpose())); + return out; +} - // Perform outlier rejection on log-domain of summed images - outlier_rejection(1.5f); - // Check for convergence - balance_converged = true; - for (auto i = Loop (0, 3) (mask, prev_mask); i; ++i) { - if (mask.value() != prev_mask.value()) { - balance_converged = false; - break; - } - } - threaded_copy (mask, prev_mask); - balance_iter++; - } +void write_weights (const Eigen::VectorXd& data, IndexType& index, const std::string& output_file_name) +{ + Header header (index); + header.datatype() = DataType::Float32; - // Solve for normalisation field weights in the log domain - Transform transform (mask); - Eigen::MatrixXd norm_field_basis (num_voxels, basis_function.n_basis_vecs); - Eigen::VectorXd y (num_voxels); - uint32_t index = 0; - for (auto i = Loop (0, 3) (mask, combined_tissue); i; ++i) { - if (mask.value()) { - Eigen::Vector3 vox (mask.index(0), mask.index(1), mask.index(2)); - Eigen::Vector3 pos = transform.voxel2scanner * vox; - norm_field_basis.row (index) = basis_function (pos).col(0); + auto out = ImageType::create (output_file_name, header); - double sum = 0.0; - for (size_t j = 0; j < n_tissue_types; ++j) { - combined_tissue.index(3) = j; - sum += balance_factors(j) * combined_tissue.value() ; - } - y (index++) = std::log(sum) - log_norm_value; + struct Write { NOMEMALIGN + void operator() (ImageType& out, IndexType& index) const { + const uint32_t idx = index.value(); + if (idx != std::numeric_limits::max()) { + out.value() = data[idx]; } } + const Eigen::VectorXd& data; + } write = { data }; + + ThreadedLoop (index, 0, 3).run (write, out, index); +} - norm_field_weights = norm_field_basis.colPivHouseholderQr().solve(y); - // Generate normalisation field in the log domain - for (auto i = Loop (0, 3) (norm_field_log); i; ++i) { - Eigen::Vector3 vox (norm_field_log.index(0), norm_field_log.index(1), norm_field_log.index(2)); - Eigen::Vector3 pos = transform.voxel2scanner * vox; - norm_field_log.value() = basis_function (pos).col(0).dot (norm_field_weights.col(0)); + +void write_output ( + const std::string& original, + const std::string& corrected, + bool output_balanced, + double balance_factor, + ImageType& field, + double lognorm_scale) +{ + using ReplicatorType = Adapter::Replicate; + + struct Scaler { NOMEMALIGN + void operator() (ImageType& original, ImageType& corrected, ReplicatorType& field) const { + corrected.value() = balance_factor * original.value() / field.value(); } + const double balance_factor; + }; - // Generate normalisation field in the image domain - for (auto i = Loop (0, 3) (norm_field_log, norm_field_image); i; ++i) - norm_field_image.value () = std::exp(norm_field_log.value()); + auto in = ImageType::open (original); + Header header (in); + header.datatype() = DataType::Float32; + header.keyval()["lognorm_scale"] = str(lognorm_scale); + if (output_balanced) + header.keyval()["lognorm_balance"] = str(balance_factor); + else + balance_factor = 1.0; + auto out = ImageType::create (corrected, header); + + Header header_broadcast (field); + header_broadcast.ndim() = 4; + header_broadcast.size(3) = in.ndim() > 3 ? in.size(3) : 1; + ReplicatorType field_broadcast (field, header_broadcast); + + Scaler scaler = { balance_factor }; + ThreadedLoop (in).run (scaler, in, out, field_broadcast); +} - progress++; - iter++; - } - progress.done(); - ProgressBar output_progress("writing output images"); - opt = get_options ("check_norm"); + + + +void run () +{ + if (argument.size() % 2) + throw Exception ("The number of arguments must be even, provided as pairs of each input and its corresponding output file."); + + const int order = get_option_value ("order", DEFAULT_POLY_ORDER); + const float reference_value = get_option_value ("reference", DEFAULT_REFERENCE_VALUE); + const float log_ref_value = std::log (reference_value); + + size_t max_iter = DEFAULT_MAIN_ITER_VALUE; + size_t max_balance_iter = DEFAULT_BALANCE_MAXITER_VALUE; + auto opt = get_options ("niter"); if (opt.size()) { - auto norm_field_output = ImageType::create (opt[0][0], header_3D); - threaded_copy (norm_field_image, norm_field_output); + vector num = opt[0][0]; + if (num.size() < 1 && num.size() > 2) + throw Exception ("unexpected number of entries provided to option \"niter\""); + for (const int n : num) + if (n < 1) + throw Exception ("number of iterations must be positive"); + + max_iter = num[0]; + if (num.size() > 1) + max_balance_iter = num[1]; } - opt = get_options ("check_mask"); - if (opt.size()) { - auto mask_output = ImageType::create (opt[0][0], mask); - threaded_copy (mask, mask_output); + // Setting the n_tissue_types + const size_t n_tissue_types = argument.size()/2; + + size_t num_voxels; + auto index = index_mask_voxels (num_voxels); + + Eigen::MatrixXd data (num_voxels, n_tissue_types); + for (size_t n = 0; n < n_tissue_types; ++n) { + if (Path::exists (argument[2*n+1]) && !App::overwrite_files) + throw Exception ("Output file \"" + argument[2*n+1] + "\" already exists. (use -force option to force overwrite)"); + load_data (data, argument[2*n], index); } - // Compute log-norm scale parameter (geometric mean of normalisation field in outlier-free mask). - float lognorm_scale (0.f); - if (num_voxels) { - for (auto i = Loop (0,3) (mask, norm_field_log); i; ++i) { - if (mask.value ()) - lognorm_scale += norm_field_log.value (); + auto basis = initialise_basis (index, num_voxels, order); + + struct finite_and_positive { NOMEMALIGN double operator() (double v) const { return std::isfinite(v) && v > 0.0; } }; + Eigen::VectorXd weights = data.rowwise().sum().unaryExpr (finite_and_positive()); + + Eigen::VectorXd field = Eigen::VectorXd::Ones (num_voxels); + Eigen::VectorXd field_coeffs (basis.cols()); + Eigen::VectorXd balance_factors (Eigen::VectorXd::Ones (n_tissue_types)); + + + { + size_t iter = 0; + ProgressBar progress ("performing log-domain intensity normalisation", max_iter); + + size_t outliers_changed = detect_outliers (3.0, data, field, balance_factors, weights); + + while (++iter <= max_iter) { + INFO ("Iteration: " + str(iter)); + + size_t balance_iter = 1; + + // Iteratively compute tissue balance factors with outlier rejection + do { + + DEBUG ("Balance and outlier rejection iteration " + str(balance_iter) + " starts."); + + if (n_tissue_types > 1) { + compute_balance_factors (data, field, weights, balance_factors); + INFO (" balance factors (" + str(balance_iter) + "): " + str(balance_factors.transpose())); + } + + outliers_changed = detect_outliers (1.5, data, field, balance_factors, weights); + + } while (outliers_changed && balance_iter++ < max_balance_iter); + + + update_field (log_ref_value, basis, data, balance_factors, weights, field_coeffs, field); + + progress++; } - lognorm_scale = std::exp(lognorm_scale / (float)num_voxels); } - for (size_t j = 0; j < output_filenames.size(); ++j) { - output_progress++; - output_headers[j].keyval()["lognorm_scale"] = str(lognorm_scale); - auto output_image = ImageType::create (output_filenames[j], output_headers[j]); - const size_t n_vols = input_images[j].size(3); - const Eigen::VectorXf zero_vec = Eigen::VectorXf::Zero (n_vols); + auto full_field = compute_full_field (order, field_coeffs, index); - for (auto i = Loop (0,3) (output_image, input_images[j], norm_field_image); i; ++i) { - input_images[j].index(3) = 0; + opt = get_options ("check_norm"); + if (opt.size()) { + auto out = ImageType::create (opt[0][0], full_field); + threaded_copy (full_field, out); + } - if (input_images[j].value() < 0.f) - output_image.row(3) = zero_vec; - else - output_image.row(3) = Eigen::VectorXf{input_images[j].row(3)} / norm_field_image.value(); - } + opt = get_options ("check_mask"); + if (opt.size()) + write_weights (weights, index, opt[0][0]); + + opt = get_options ("check_factors"); + if (opt.size()) { + File::OFStream factors_output (opt[0][0]); + factors_output << balance_factors.transpose() << "\n"; } + + double lognorm_scale = std::exp ((field.array().log() * weights.array()).sum() / weights.sum()); + + const bool output_balanced = get_options("balanced").size(); + for (size_t n = 0; n < n_tissue_types; ++n) + write_output (argument[2*n], argument[2*n+1], output_balanced, balance_factors[n], full_field, lognorm_scale); } + diff --git a/cmd/peaks2amp.cpp b/cmd/peaks2amp.cpp index 1dd5bfb3b4..c4d2f1bfaa 100644 --- a/cmd/peaks2amp.cpp +++ b/cmd/peaks2amp.cpp @@ -1,21 +1,23 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "algo/loop.h" +#include "fixel/helpers.h" using namespace MR; @@ -26,7 +28,7 @@ void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)"; - SYNOPSIS = "Convert peak directions image to amplitudes"; + SYNOPSIS = "Extract amplitudes from a peak directions image"; ARGUMENTS + Argument ("directions", "the input directions image. Each volume corresponds to the x, y & z " @@ -39,13 +41,15 @@ void usage () void run () { - auto dir = Image::open (argument[0]); + Header H_in = Header::open (argument[0]); + Peaks::check (H_in); + auto dir = H_in.get_image(); Header header (dir); header.size(3) = header.size(3)/3; auto amp = Image::create (argument[1], header); - + auto loop = Loop("converting directions to amplitudes", 0, 3); for (auto i = loop (dir, amp); i; ++i) { diff --git a/cmd/peaks2fixel.cpp b/cmd/peaks2fixel.cpp new file mode 100644 index 0000000000..acdc532dbc --- /dev/null +++ b/cmd/peaks2fixel.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * + * MRtrix3 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/ + */ + + +#include "command.h" +#include "image.h" +#include "algo/loop.h" +#include "fixel/helpers.h" + + +using namespace MR; +using namespace App; + + +void usage () +{ + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Convert peak directions image to a fixel directory"; + + ARGUMENTS + + Argument ("directions", "the input directions image; each volume corresponds to the x, y & z " + "component of each direction vector in turn.").type_image_in() + + + Argument ("fixels", "the output fixel directory.").type_directory_out(); + + OPTIONS + + Option ("dataname", "the name of the output fixel data file encoding peak amplitudes") + + Argument ("path").type_text(); + +} + + + +vector get (Image& data) +{ + data.index(3) = 0; + vector result; + while (data.index(3) < data.size(3)) { + Eigen::Vector3 direction; + for (size_t axis = 0; axis != 3; ++axis) { + direction[axis] = data.value(); + data.index(3)++; + } + if (direction.allFinite() && direction.squaredNorm()) + result.push_back (direction); + } + return result; +} + + + +void run () +{ + std::string dataname = get_option_value ("dataname", ""); + + auto input_header = Header::open (argument[0]); + Peaks::check (input_header); + auto input_directions = input_header.get_image(); + uint32_t nfixels = 0; + bool all_unit_norm = true; + for (auto l = Loop("counting fixels in input image", 0, 3) (input_directions); l; ++l) { + auto dirs = get (input_directions); + nfixels += dirs.size(); + for (const auto& d : dirs) { + if (MR::abs (d.squaredNorm() - 1.0) > 1e-4) + all_unit_norm = false; + } + } + INFO ("Number of fixels in input peaks image: " + str(nfixels)); + if (all_unit_norm) { + if (dataname.size()) { + WARN ("Input peaks image appears to not include amplitude information; " + "requested data file \"" + dataname + "\" will likely contain only ones"); + } else { + INFO ("All peaks have unit norm; no need to create amplitudes fixel data file"); + } + } else if (!dataname.size()) { + dataname = "amplitudes.mif"; + INFO ("Peaks have variable amplitudes; will create additional fixel data file \"" + dataname + "\""); + } + + Fixel::check_fixel_directory (argument[1], true, true); + + // Easiest if we first make the index image + const std::string index_path = Path::join (argument[1], "index.mif"); + Header index_header (input_header); + index_header.name() = index_path; + index_header.datatype() = DataType::UInt32; + index_header.datatype().set_byte_order_native(); + index_header.size(3) = 2; + index_header.keyval()[Fixel::n_fixels_key] = str(nfixels); + auto index_image = Image::create (index_path, index_header); + + Header directions_header = Fixel::directions_header_from_index (index_header); + directions_header.datatype() = DataType::Float32; + directions_header.datatype().set_byte_order_native(); + auto directions_image = Image::create (Path::join (argument[1], "directions.mif"), directions_header); + + Image amplitudes_image; + if (dataname.size()) { + Header amplitudes_header = Fixel::data_header_from_index (index_header); + amplitudes_image = Image::create (Path::join (argument[1], dataname), amplitudes_header); + } + + uint32_t output_index = 0; + for (auto l = Loop("converting peaks to fixel format", 0, 3) (input_directions, index_image); l; ++l) { + auto dirs = get (input_directions); + index_image.index(3) = 0; index_image.value() = dirs.size(); + index_image.index(3) = 1; index_image.value() = dirs.size() ? output_index : 0; + for (auto d : dirs) { + directions_image.index(0) = output_index; + if (amplitudes_image.valid()) { + directions_image.row(1) = d.normalized(); + amplitudes_image.index(0) = output_index; + amplitudes_image.value() = d.norm(); + } else { + directions_image.row(1) = d; + } + ++output_index; + } + } + +} diff --git a/cmd/sh2amp.cpp b/cmd/sh2amp.cpp index eb9793622e..1e3a383a1b 100644 --- a/cmd/sh2amp.cpp +++ b/cmd/sh2amp.cpp @@ -1,24 +1,29 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" -#include "math/SH.h" #include "image.h" #include "dwi/gradient.h" +#include "dwi/shells.h" +#include "math/sphere.h" +#include "math/SH.h" + +#include "dwi/directions/file.h" using namespace MR; @@ -27,10 +32,35 @@ using namespace App; void usage () { - AUTHOR = "David Raffelt (david.raffelt@florey.edu.au)"; + AUTHOR = "David Raffelt (david.raffelt@florey.edu.au) and J-Donald Tournier (jdtournier@gmail.com)"; SYNOPSIS = "Evaluate the amplitude of an image of spherical harmonic functions along specified directions"; + DESCRIPTION + + "The input image should consist of a 4D or 5D image, with SH coefficients " + "along the 4th dimension according to the convention below. If 4D (or " + "size 1 along the 5th dimension), the program expects to be provided with " + "a single shell of directions. If 5D, each set of coefficients along the " + "5th dimension is understood to correspond to a different shell." + + "The directions can be provided as:\n" + "- a 2-column ASCII text file contained azimuth / elevation pairs (as " + "produced by dirgen)\n" + "- a 3-column ASCII text file containing x, y, z Cartesian direction " + "vectors (as produced by dirgen -cart)\n" + "- a 4-column ASCII text file containing the x, y, z, b components of a " + "full DW encoding scheme (in MRtrix format, see main documentation for " + "details).\n" + "- an image file whose header contains a valid DW encoding scheme" + + "If a full DW encoding is provided, the number of shells needs to match " + "those found in the input image of coefficients (i.e. its size along the 5th " + "dimension). If needed, the -shell option can be used to pick out the " + "specific shell(s) of interest." + + "If the input image contains multiple shells (its size along the 5th " + "dimension is greater than one), the program will expect the direction " + "set to contain multiple shells, which can only be provided as a full DW " + "encodings (the last two options in the list above)." + + Math::SH::encoding_description; + ARGUMENTS + Argument ("input", "the input image consisting of spherical harmonic (SH) " @@ -43,12 +73,9 @@ void usage () "functions along the specified directions.").type_image_out (); OPTIONS - + Option ("gradient", - "assume input directions are supplied as a gradient encoding file") - + Option ("nonnegative", "cap all negative amplitudes to zero") - + + DWI::GradImportOptions() + Stride::Options + DataType::options(); } @@ -57,65 +84,161 @@ void usage () using value_type = float; + + + class SH2Amp { MEMALIGN(SH2Amp) public: - template - SH2Amp (const MatrixType& dirs, const size_t lmax, bool nonneg) : - transformer (dirs.template cast(), lmax), - nonnegative (nonneg), - sh (transformer.n_SH()), - amp (transformer.n_amp()) { } + SH2Amp (const Eigen::MatrixXd& transform, bool nonneg) : + transform (transform), + nonnegative (nonneg) { } void operator() (Image& in, Image& out) { sh = in.row (3); - transformer.SH2A(amp, sh); + amp = transform * sh; if (nonnegative) - amp = amp.cwiseMax(value_type(0.0)); + amp = amp.cwiseMax(0.0); out.row (3) = amp; } private: - const Math::SH::Transform transformer; + const Eigen::MatrixXd& transform; + const bool nonnegative; + Eigen::VectorXd sh, amp; +}; + + + + + + +class SH2AmpMultiShell { MEMALIGN(SH2AmpMultiShell) + public: + SH2AmpMultiShell (const vector& dirs, const DWI::Shells& shells, bool nonneg) : + transforms (dirs), + shells (shells), + nonnegative (nonneg) { } + + void operator() (Image& in, Image& out) { + for (size_t n = 0; n < transforms.size(); ++n) { + if (in.ndim() > 4) + in.index(4) = n; + sh = in.row (3); + + amp = transforms[n] * sh; + + if (nonnegative) + amp = amp.cwiseMax(value_type(0.0)); + + for (ssize_t k = 0; k < amp.size(); ++k) { + out.index(3) = shells[n].get_volumes()[k]; + out.value() = amp[k]; + } + } + } + + private: + const vector& transforms; + const DWI::Shells& shells; const bool nonnegative; - Eigen::Matrix sh, amp; + Eigen::VectorXd sh, amp; }; + + + void run () { auto sh_data = Image::open(argument[0]); Math::SH::check (sh_data); - - Header amp_header (sh_data); + const size_t lmax = Math::SH::LforN (sh_data.size(3)); Eigen::MatrixXd directions; - - if (get_options("gradient").size()) { - Eigen::MatrixXd grad; - grad = load_matrix(argument[1]); - DWI::Shells shells (grad); - directions = DWI::gen_direction_matrix (grad, shells.largest().get_volumes()); - } else { - directions = load_matrix(argument[1]); + try { + directions = DWI::Directions::load_spherical (argument[1]); + } + catch (Exception& E) { + try { + directions = load_matrix (argument[1]); + if (directions.cols() < 4) + throw ("unable to interpret file \"" + std::string(argument[1]) + "\" as a directions or gradient file"); + } + catch (Exception& E) { + auto header = Header::open (argument[1]); + directions = DWI::get_DW_scheme (header); + } } - if (!directions.rows()) + if (!directions.size()) throw Exception ("no directions found in input directions file"); - std::stringstream dir_stream; - for (ssize_t d = 0; d < directions.rows() - 1; ++d) - dir_stream << directions(d,0) << "," << directions(d,1) << "\n"; - dir_stream << directions(directions.rows() - 1,0) << "," << directions(directions.rows() - 1,1); - amp_header.keyval().insert(std::pair ("directions", dir_stream.str())); - + Header amp_header (sh_data); + amp_header.ndim() = 4; amp_header.size(3) = directions.rows(); Stride::set_from_command_line (amp_header, Stride::contiguous_along_axis (3, amp_header)); amp_header.datatype() = DataType::from_command_line (DataType::Float32); - auto amp_data = Image::create(argument[2], amp_header); + if (directions.cols() == 2) { // single-shell: + + if (sh_data.ndim() > 4 && sh_data.size(4) > 1) { + Exception excp ("multi-shell input data provided with single-shell direction set"); + excp.push_back (" use full DW scheme to operate on multi-shell data"); + throw excp; + } + + amp_header.ndim() = 4; - SH2Amp sh2amp (directions, Math::SH::LforN (sh_data.size(3)), get_options("nonnegative").size()); - ThreadedLoop("computing amplitudes", sh_data, 0, 3, 2).run (sh2amp, sh_data, amp_data); + std::stringstream dir_stream; + for (ssize_t d = 0; d < directions.rows() - 1; ++d) + dir_stream << directions(d,0) << "," << directions(d,1) << "\n"; + dir_stream << directions(directions.rows() - 1,0) << "," << directions(directions.rows() - 1,1); + amp_header.keyval()["directions"] = dir_stream.str(); + auto amp_data = Image::create(argument[2], amp_header); + auto transform = Math::SH::init_transform (directions, lmax); + + SH2Amp sh2amp (transform, get_options("nonnegative").size()); + ThreadedLoop("computing amplitudes", sh_data, 0, 3, 2).run (sh2amp, sh_data, amp_data); + + } + else { // full gradient scheme: + + DWI::set_DW_scheme (amp_header, directions); + auto shells = DWI::Shells (directions).select_shells (false, false, false); + + if (shells.count() == 0) + throw Exception ("no shells found in gradient scheme"); + + if (shells.count() > 1) { + if (sh_data.ndim() < 5) + throw Exception ("multiple shells detected in gradient scheme, but only one shell in input data"); + if (sh_data.size(4) != ssize_t (shells.count())) + throw Exception ("number of shells differs between gradient scheme and input data"); + } + else if (! (sh_data.ndim() == 4 || ( sh_data.ndim() > 4 && ( sh_data.size(4) != 1 ))) ) + throw Exception ("number of shells differs between gradient scheme and input data"); + + vector transforms; + + for (size_t n = 0; n < shells.count(); ++n) { + Eigen::MatrixXd dirs (shells[n].count(), 2); + if (shells[n].is_bzero()) { + dirs.setConstant (0.0); + } + else { + for (size_t idx = 0; idx < shells[n].count(); ++idx) + Math::Sphere::cartesian2spherical (directions.row (shells[n].get_volumes()[idx]).head (3), dirs.row (idx)); + } + transforms.push_back (Math::SH::init_transform (dirs, lmax)); + } + + auto amp_data = Image::create(argument[2], amp_header); + + SH2AmpMultiShell sh2amp (transforms, shells, get_options("nonnegative").size()); + ThreadedLoop("computing amplitudes", sh_data, 0, 3).run (sh2amp, sh_data, amp_data); + + } } + diff --git a/cmd/sh2peaks.cpp b/cmd/sh2peaks.cpp index 17f483d27f..e9fb5e88f2 100644 --- a/cmd/sh2peaks.cpp +++ b/cmd/sh2peaks.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "math/SH.h" #include "memory.h" @@ -32,7 +33,14 @@ void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)"; - SYNOPSIS = "Extract the peaks of a spherical harmonic function at each voxel, by commencing a Newton search along a set of specified directions"; + SYNOPSIS = "Extract the peaks of a spherical harmonic function in each voxel"; + + DESCRIPTION + + "Peaks of the spherical harmonic function in each voxel are located by " + "commencing a Newton search along each of a set of pre-specified directions"; + + DESCRIPTION + + Math::SH::encoding_description; ARGUMENTS + Argument ("SH", "the input image of SH coefficients.") @@ -74,6 +82,11 @@ void usage () + Option ("fast", "use lookup table to compute associated Legendre polynomials (faster, but approximate)."); + + REFERENCES + + "Jeurissen, B.; Leemans, A.; Tournier, J.-D.; Jones, D.K.; Sijbers, J. " + "Investigating the prevalence of complex fiber configurations in white matter tissue with diffusion magnetic resonance imaging. " + "Human Brain Mapping, 2013, 34(11), 2747-2766"; } diff --git a/cmd/sh2power.cpp b/cmd/sh2power.cpp index ec6b0e409d..b16e1e6125 100644 --- a/cmd/sh2power.cpp +++ b/cmd/sh2power.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "math/SH.h" @@ -31,7 +32,9 @@ void usage () DESCRIPTION + "This command computes the sum of squared SH coefficients, " "which equals the mean-squared amplitude " - "of the spherical function it represents."; + "of the spherical function it represents." + + + Math::SH::encoding_description; ARGUMENTS + Argument ("SH", "the input spherical harmonics coefficients image.").type_image_in () diff --git a/cmd/sh2response.cpp b/cmd/sh2response.cpp index 44649dbcc3..6ba82f6866 100644 --- a/cmd/sh2response.cpp +++ b/cmd/sh2response.cpp @@ -1,32 +1,31 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "exception.h" +#include "image.h" #include "mrtrix.h" #include "progressbar.h" - -#include "image.h" #include "algo/loop.h" - +#include "dwi/gradient.h" +#include "dwi/shells.h" #include "math/math.h" #include "math/SH.h" #include "math/ZSH.h" -#include "dwi/gradient.h" -#include "dwi/shells.h" @@ -46,6 +45,9 @@ void usage () SYNOPSIS = "Generate an appropriate response function from the image data for spherical deconvolution"; + DESCRIPTION + + Math::SH::encoding_description; + ARGUMENTS + Argument ("SH", "the spherical harmonic decomposition of the diffusion-weighted images").type_image_in() + Argument ("mask", "the mask containing the voxels from which to estimate the response function").type_image_in() @@ -66,7 +68,7 @@ using value_type = double; -void run () +void run () { auto SH = Image::open(argument[0]); Math::SH::check (SH); @@ -97,7 +99,7 @@ void run () auto loop = Loop ("estimating response function", SH, 0, 3); for (auto l = loop(mask, SH, dir); l; ++l) { - if (!mask.value()) + if (!mask.value()) continue; Eigen::Vector3d d = dir.row(3); @@ -128,10 +130,10 @@ void run () value_type val = AL[l] * d_dot_s / d_dot_d; response[Math::ZSH::index(l)] += val; - if (dump_stream.is_open()) + if (dump_stream.is_open()) dump_stream << val << " "; } - if (dump_stream.is_open()) + if (dump_stream.is_open()) dump_stream << "\n"; ++count; diff --git a/cmd/shbasis.cpp b/cmd/shbasis.cpp index 4c9314514c..935a52e023 100644 --- a/cmd/shbasis.cpp +++ b/cmd/shbasis.cpp @@ -1,20 +1,20 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "app.h" -#include "bitset.h" #include "command.h" #include "datatype.h" #include "header.h" @@ -24,8 +24,8 @@ #include "types.h" #include "algo/loop.h" - #include "math/SH.h" +#include "misc/bitset.h" using namespace MR; @@ -56,7 +56,9 @@ void usage () + "Note that the \"force_*\" conversion choices should only be used in cases where this " "command has previously been unable to automatically determine the SH basis from the " - "image data, but the user themselves are confident of the SH basis of the data."; + "image data, but the user themselves are confident of the SH basis of the data." + + + Math::SH::encoding_description; ARGUMENTS @@ -168,7 +170,7 @@ void check_and_update (Header& H, const conv_t conversion) } if (progress) - progress = NULL; + progress.reset (nullptr); // First is ratio to be used for SH basis decision, second is gradient of regression std::pair regression = std::make_pair (0.0f, 0.0f); @@ -330,5 +332,5 @@ void run () } -}; +} diff --git a/cmd/shconv.cpp b/cmd/shconv.cpp index f0d6bfba96..fbd8ee79d8 100644 --- a/cmd/shconv.cpp +++ b/cmd/shconv.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "memory.h" #include "progressbar.h" @@ -27,19 +28,31 @@ using namespace App; void usage () { - AUTHOR = "David Raffelt (david.raffelt@florey.edu.au)"; + AUTHOR = "David Raffelt (david.raffelt@florey.edu.au) and J-Donald Tournier (jdtournier@gmail.com)"; + + SYNOPSIS = "Perform spherical convolution"; + + DESCRIPTION + + "Provided with matching pairs of response function and ODF images " + "(containing SH coefficients), perform spherical convolution to provide the " + "corresponding SH coefficients of the signal." + + "If multiple pairs of inputs are provided, their contributions will be " + "summed into a single output." + + "If the responses are multi-shell (with one line of coefficients per " + "shell), the output will be a 5-dimensional image, with the SH " + "coefficients of the signal in each shell stored at different indices " + "along the 5th dimension." + + Math::SH::encoding_description; - SYNOPSIS = "Perform a spherical convolution"; + DESCRIPTION + + Math::SH::encoding_description; ARGUMENTS - + Argument ("SH_in", "the input spherical harmonics coefficients image.").type_image_in () - + Argument ("response", "the convolution kernel (response function)").type_file_in () + + Argument ("odf response", "pairs of input ODF image and corresponding responses").allow_multiple() + Argument ("SH_out", "the output spherical harmonics coefficients image.").type_image_out (); OPTIONS - + Option ("mask", "only perform computation within the specified binary brain mask image.") - + Argument ("image", "the mask image to use.").type_image_in () - + + DataType::options() + Stride::Options; } @@ -50,53 +63,82 @@ using value_type = float; class SConvFunctor { MEMALIGN(SConvFunctor) public: - SConvFunctor (const size_t n, Image& mask, - const Eigen::Matrix& response) : - image_mask (mask), - response (response), - SH_in (n), - SH_out (n) { } - - void operator() (Image& in, Image& out) { - if (image_mask.valid()) { - assign_pos_of(in).to(image_mask); - if (!image_mask.value()) { - out.row(3) = 0.0; - return; + SConvFunctor (const vector& responses, vector>& inputs) : + responses (responses), + inputs (inputs) { } + + void operator() (Image& output) + { + for (size_t n = 0; n < inputs.size(); ++n) { + assign_pos_of (output, 0, 3).to (inputs[n]); + in = inputs[n].row (3); + for (ssize_t s = 0; s < responses[n].rows(); ++s) { + Math::SH::sconv (out, responses[n].row(s), in); + if (output.ndim() > 4) + output.index(4) = s; + for (ssize_t k = 0; k < out.size(); ++k) { + output.index(3) = k; + output.value() += out[k]; + } } } - SH_in = in.row(3); - out.row(3) = Math::SH::sconv (SH_out, response, SH_in); } protected: - Image image_mask; - Eigen::Matrix response; - Eigen::Matrix SH_in, SH_out; + const vector& responses; + vector> inputs; + Eigen::VectorXd in, out; }; -void run() { - auto image_in = Image::open (argument[0]).with_direct_io (3); - Math::SH::check (image_in); - auto responseZSH = load_vector(argument[1]); - Eigen::Matrix responseRH; - Math::ZSH::ZSH2RH (responseRH, responseZSH); - auto mask = Image(); - auto opt = get_options ("mask"); - if (opt.size()) { - mask = Header::open (opt[0][0]).get_image(); - check_dimensions (image_in, mask, 0, 3); + +void run() +{ + if (!(argument.size() & size_t(1U))) + throw Exception ("unexpected number of arguments"); + + vector> inputs ((argument.size() - 1) / 2); + vector responses (inputs.size()); + + size_t lmax = 0; + for (size_t n = 0; n < inputs.size(); ++n) { + inputs[n] = Image::open (argument[2*n]); + Math::SH::check (inputs[n]); + if (inputs[n].ndim() > 4 && inputs[n].size(4) > 1) + throw Exception ("input ODF contains more than 4 dimensions"); + + responses[n] = load_matrix (argument[2*n+1]); + responses[n].conservativeResizeLike (Eigen::MatrixXd::Zero (responses[n].rows(), Math::ZSH::NforL (Math::SH::LforN (inputs[n].size (3))))); + lmax = std::max (Math::ZSH::LforN (responses[n].cols()), lmax); + + for (ssize_t k = 0; k < responses[n].rows(); ++k) + responses[n].row(k) = Math::ZSH::ZSH2RH (responses[n].row(k)); + + if (n) { + if (responses[n].rows() != responses[0].rows()) + throw Exception ("number of shells differs between response files"); + check_dimensions (inputs[n], inputs[0], 0, 3); + } } - auto header = Header(image_in); - Stride::set_from_command_line (header); - auto image_out = Image::create (argument[2], header); - - SConvFunctor sconv (image_in.size(3), mask, responseRH); - ThreadedLoop ("performing convolution", image_in, 0, 3, 2).run (sconv, image_in, image_out); + + Header header (inputs[0]); + if (responses[0].rows() > 1) { + header.ndim() = 5; + header.size(4) = responses[0].rows(); + } + else + header.ndim() = 4; + header.size(3) = Math::SH::NforL (lmax); + Stride::set_from_command_line (header, Stride::contiguous_along_axis (3, header)); + header.datatype() = DataType::from_command_line (DataType::Float32); + + auto output = Image::create (argument[argument.size()-1], header); + + SConvFunctor sconv (responses, inputs); + ThreadedLoop ("performing spherical convolution", inputs[0], 0, 3).run (sconv, output); } diff --git a/cmd/shview.cpp b/cmd/shview.cpp index f66d6a6a25..630a456b9a 100644 --- a/cmd/shview.cpp +++ b/cmd/shview.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "gui/gui.h" #include "command.h" #include "progressbar.h" @@ -20,6 +21,7 @@ #include "math/SH.h" #include "gui/shview/icons.h" #include "gui/shview/render_window.h" +#include "gui/shview/file_open.h" using namespace MR; diff --git a/cmd/tck2connectome.cpp b/cmd/tck2connectome.cpp index d90d596561..39781dfdf7 100644 --- a/cmd/tck2connectome.cpp +++ b/cmd/tck2connectome.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" @@ -46,6 +47,43 @@ void usage () SYNOPSIS = "Generate a connectome matrix from a streamlines file and a node parcellation image"; + + EXAMPLES + + Example ("Default usage", + "tck2connectome tracks.tck nodes.mif connectome.csv -tck_weights_in weights.csv -out_assignments assignments.txt", + "By default, the metric of connectivity quantified in the connectome matrix is the " + "number of streamlines; or, if tcksift2 is used, the sum of streamline weights via the " + "-tck_weights_in option. Use of the -out_assignments option is recommended as this " + "enables subsequent use of the connectome2tck command.") + + + Example ("Generate a matrix consisting of the mean streamline length between each node pair", + "tck2connectome tracks.tck nodes.mif distances.csv -scale_length -stat_edge mean", + "By multiplying the contribution of each streamline to the connectome by the length " + "of that streamline, and then, for each edge, computing the mean value across the " + "contributing streamlines, one obtains a matrix where the value in each entry is the " + "mean length across those streamlines belonging to that edge.") + + + Example ("Generate a connectome matrix where the value of connectivity is the \"mean FA\"", + "tcksample tracks.tck FA.mif mean_FA_per_streamline.csv -stat_tck mean; " + "tck2connectome tracks.tck nodes.mif mean_FA_connectome.csv -scale_file mean_FA_per_streamline.csv -stat_edge mean", + "Here, a connectome matrix that is \"weighted by FA\" is generated in multiple steps: " + "firstly, for each streamline, the value of the underlying FA image is sampled at each " + "vertex, and the mean of these values is calculated to produce a single scalar value of " + "\"mean FA\" per streamline; then, as each streamline is assigned to nodes within the " + "connectome, the magnitude of the contribution of that streamline to the matrix is " + "multiplied by the mean FA value calculated prior for that streamline; finally, for " + "each connectome edge, across the values of \"mean FA\" that were contributed by all " + "of the streamlines assigned to that particular edge, the mean value is calculated.") + + + Example ("Generate the connectivity fingerprint for streamlines seeded from a particular region", + "tck2connectome fixed_seed_tracks.tck nodes.mif fingerprint.csv -vector", + "This usage assumes that the streamlines being provided to the command have all been " + "seeded from the (effectively) same location, and as such, only the endpoint of each " + "streamline (not their starting point) is assigned based on the provided parcellation " + "image. Accordingly, the output file contains only a vector of connectivity values " + "rather than a matrix, since each streamline is assigned to only one node rather than two."); + + ARGUMENTS + Argument ("tracks_in", "the input track file").type_tracks_in() + Argument ("nodes_in", "the input node parcellation image").type_image_in() @@ -145,13 +183,15 @@ void execute (Image& node_image, const node_t max_node_index, const std: void run () { - auto node_image = Image::open (argument[1]); + auto node_header = Header::open (argument[1]); + MR::Connectome::check (node_header); + auto node_image = node_header.get_image(); // First, find out how many segmented nodes there are, so the matrix can be pre-allocated // Also check for node volume for all nodes vector node_volumes (1, 0); node_t max_node_index = 0; - for (auto i = Loop (node_image) (node_image); i; ++i) { + for (auto i = Loop (node_image, 0, 3) (node_image); i; ++i) { if (node_image.value() > max_node_index) { max_node_index = node_image.value(); node_volumes.resize (max_node_index + 1, 0); diff --git a/cmd/tck2fixel.cpp b/cmd/tck2fixel.cpp index 1b18af82f3..177205abe1 100644 --- a/cmd/tck2fixel.cpp +++ b/cmd/tck2fixel.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "algo/loop.h" @@ -21,6 +22,7 @@ #include "fixel/helpers.h" #include "fixel/keys.h" #include "fixel/loop.h" +#include "fixel/types.h" #include "dwi/tractography/mapping/mapper.h" #include "dwi/tractography/mapping/loader.h" @@ -30,6 +32,8 @@ using namespace MR; using namespace App; +using Fixel::index_type; + #define DEFAULT_ANGLE_THRESHOLD 45.0 @@ -39,7 +43,7 @@ class TrackProcessor { MEMALIGN (TrackProcessor) using SetVoxelDir = DWI::Tractography::Mapping::SetVoxelDir; - TrackProcessor (Image& fixel_indexer, + TrackProcessor (Image& fixel_indexer, const vector& fixel_directions, vector& fixel_TDI, const float angular_threshold): @@ -55,15 +59,15 @@ class TrackProcessor { MEMALIGN (TrackProcessor) for (SetVoxelDir::const_iterator i = in.begin(); i != in.end(); ++i) { assign_pos_of (*i).to (fixel_indexer); fixel_indexer.index(3) = 0; - uint32_t num_fibres = fixel_indexer.value(); + index_type num_fibres = fixel_indexer.value(); if (num_fibres > 0) { fixel_indexer.index(3) = 1; - uint32_t first_index = fixel_indexer.value(); - uint32_t last_index = first_index + num_fibres; - uint32_t closest_fixel_index = 0; + index_type first_index = fixel_indexer.value(); + index_type last_index = first_index + num_fibres; + index_type closest_fixel_index = 0; float largest_dp = 0.0; const Eigen::Vector3 dir (i->get_dir().normalized()); - for (uint32_t j = first_index; j < last_index; ++j) { + for (index_type j = first_index; j < last_index; ++j) { const float dp = abs (dir.dot (fixel_directions[j])); if (dp > largest_dp) { largest_dp = dp; @@ -81,7 +85,7 @@ class TrackProcessor { MEMALIGN (TrackProcessor) private: - Image fixel_indexer; + Image fixel_indexer; const vector& fixel_directions; vector& fixel_TDI; const float angular_threshold_dp; @@ -113,7 +117,7 @@ void write_fixel_output (const std::string& filename, const Header& header) { auto output = Image::create (filename, header); - for (uint32_t i = 0; i < data.size(); ++i) { + for (size_t i = 0; i < data.size(); ++i) { output.index(0) = i; output.value() = data[i]; } @@ -125,9 +129,9 @@ void run () { const std::string input_fixel_folder = argument[1]; Header index_header = Fixel::find_index_header (input_fixel_folder); - auto index_image = index_header.get_image(); + auto index_image = index_header.get_image(); - const uint32_t num_fixels = Fixel::get_number_of_fixels (index_header); + const index_type num_fixels = Fixel::get_number_of_fixels (index_header); const float angular_threshold = get_option_value ("angle", DEFAULT_ANGLE_THRESHOLD); @@ -144,8 +148,8 @@ void run () for (auto i = Loop ("loading template fixel directions and positions", index_image, 0, 3)(index_image); i; ++i) { const Eigen::Vector3 vox ((default_type)index_image.index(0), (default_type)index_image.index(1), (default_type)index_image.index(2)); index_image.index(3) = 1; - uint32_t offset = index_image.value(); - size_t fixel_index = 0; + index_type offset = index_image.value(); + index_type fixel_index = 0; for (auto f = Fixel::Loop (index_image) (directions_data); f; ++f, ++fixel_index) { directions[offset + fixel_index] = directions_data.row(1); positions[offset + fixel_index] = image_transform.voxel2scanner * vox; diff --git a/cmd/tckconvert.cpp b/cmd/tckconvert.cpp index 4b29a1f866..1da9b61b6b 100644 --- a/cmd/tckconvert.cpp +++ b/cmd/tckconvert.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include #include "command.h" @@ -45,7 +46,7 @@ void usage () "where square brackets denote the position of the numbering for the files, " "for example:" - + "$ tckconvert input.tck output-[].txt" + + "$ tckconvert input.tck output-'[]'.txt" + "will produce files named output-0000.txt, output-0001.txt, output-0002.txt, ..."; @@ -94,38 +95,43 @@ void usage () } + + + + + class VTKWriter: public WriterInterface { MEMALIGN(VTKWriter) -public: + public: VTKWriter(const std::string& file) : VTKout (file) { - // create and write header of VTK output file: - VTKout << - "# vtk DataFile Version 1.0\n" - "Data values for Tracks\n" - "ASCII\n" - "DATASET POLYDATA\n" - "POINTS "; - // keep track of offset to write proper value later: - offset_num_points = VTKout.tellp(); - VTKout << "XXXXXXXXXX float\n"; + // create and write header of VTK output file: + VTKout << + "# vtk DataFile Version 1.0\n" + "Data values for Tracks\n" + "ASCII\n" + "DATASET POLYDATA\n" + "POINTS "; + // keep track of offset to write proper value later: + offset_num_points = VTKout.tellp(); + VTKout << "XXXXXXXXXX float\n"; } bool operator() (const Streamline& tck) { - // write out points, and build index of tracks: - size_t start_index = current_index; - current_index += tck.size(); - track_list.push_back (std::pair (start_index, current_index)); + // write out points, and build index of tracks: + size_t start_index = current_index; + current_index += tck.size(); + track_list.push_back (std::pair (start_index, current_index)); - for (auto pos : tck) { - VTKout << pos[0] << " " << pos[1] << " " << pos[2] << "\n"; - } - return true; + for (const auto& pos : tck) { + VTKout << pos[0] << " " << pos[1] << " " << pos[2] << "\n"; + } + return true; } ~VTKWriter() { try { // write out list of tracks: VTKout << "LINES " << track_list.size() << " " << track_list.size() + current_index << "\n"; - for (const auto track : track_list) { + for (const auto& track : track_list) { VTKout << track.second - track.first << " " << track.first; for (size_t i = track.first + 1; i < track.second; ++i) VTKout << " " << i; @@ -145,7 +151,7 @@ class VTKWriter: public WriterInterface { MEMALIGN(VTKWriter) } } -private: + private: File::OFStream VTKout; size_t offset_num_points; vector> track_list; @@ -155,8 +161,12 @@ class VTKWriter: public WriterInterface { MEMALIGN(VTKWriter) + + + + class VTKReader: public ReaderInterface { MEMALIGN(VTKReader) -public: + public: VTKReader(const std::string& file) { points = NULL; lines = NULL; @@ -197,19 +207,19 @@ class VTKReader: public ReaderInterface { MEMALIGN(VTKReader) } bool operator() (Streamline& tck) { - tck.clear(); - if ( lineIdx < number_of_line_indices ) { - int count = lines[lineIdx]; + tck.clear(); + if ( lineIdx < number_of_line_indices ) { + int count = lines[lineIdx]; + lineIdx++; + for ( int i = 0; i < count; i++ ) { + int idx = lines[lineIdx]; + Eigen::Vector3f f ( points[idx*3], points[idx*3+1], points[idx*3+2] ); + tck.push_back(f); lineIdx++; - for ( int i = 0; i < count; i++ ) { - int idx = lines[lineIdx]; - Eigen::Vector3f f ( points[idx*3], points[idx*3+1], points[idx*3+2] ); - tck.push_back(f); - lineIdx++; - } - return true; } - return false; + return true; + } + return false; } ~VTKReader() { @@ -217,52 +227,60 @@ class VTKReader: public ReaderInterface { MEMALIGN(VTKReader) if ( lines != NULL ) { delete[] lines; } } -private: - float *points; - int *lines; - int lineIdx; - int number_of_lines; - int number_of_line_indices; + private: + float *points; + int *lines; + int lineIdx; + int number_of_lines; + int number_of_line_indices; }; + + + class ASCIIReader: public ReaderInterface { MEMALIGN(ASCIIReader) -public: + public: ASCIIReader(const std::string& file) { auto num = list.parse_scan_check(file); } bool operator() (Streamline& tck) { - tck.clear(); - if (item < list.size()) { - auto t = load_matrix(list[item].name()); - for (size_t i = 0; i < size_t(t.rows()); i++) - tck.push_back(Eigen::Vector3f(t.row(i))); - item++; - return true; - } - return false; + tck.clear(); + if (item < list.size()) { + auto t = load_matrix(list[item].name()); + for (size_t i = 0; i < size_t(t.rows()); i++) + tck.push_back(Eigen::Vector3f(t.row(i))); + item++; + return true; + } + return false; } ~ASCIIReader() { } -private: + private: File::ParsedName::List list; size_t item = 0; }; + + + + + class ASCIIWriter: public WriterInterface { MEMALIGN(ASCIIWriter) -public: + public: ASCIIWriter(const std::string& file) { - count.push_back(0); - parser.parse(file); - if (parser.ndim() != 1) - throw Exception ("output file specifier should contain one placeholder for numbering (e.g. output-[].txt)"); - parser.calculate_padding({1000000}); + count.push_back(0); + parser.parse(file); + if (parser.ndim() != 1) + throw Exception ("output file specifier should contain one placeholder for numbering (e.g. output-[].txt)"); + parser.calculate_padding({1000000}); } bool operator() (const Streamline& tck) { @@ -277,213 +295,220 @@ class ASCIIWriter: public WriterInterface { MEMALIGN(ASCIIWriter) ~ASCIIWriter() { } -private: + private: File::NameParser parser; vector count; }; + + + + + class PLYWriter: public WriterInterface { MEMALIGN(PLYWriter) -public: - PLYWriter(const std::string& file, int increment = 1, float radius = 0.1, int sides = 5) : out(file), increment(increment), radius(radius), sides(sides) { - vertexFilename = File::create_tempfile(0,".vertex"); - faceFilename = File::create_tempfile(0,".face"); + public: + PLYWriter(const std::string& file, int increment = 1, float radius = 0.1, int sides = 5) : + out(file), increment(increment), + radius(radius), sides(sides) { + vertexFilename = File::create_tempfile (0,"vertex"); + faceFilename = File::create_tempfile (0,"face"); vertexOF.open(vertexFilename); faceOF.open(faceFilename); num_faces = 0; num_vertices = 0; - } + } Eigen::Vector3f computeNormal ( const Streamline& tck ) { - // copy coordinates to matrix in Eigen format - size_t num_atoms = tck.size(); - Eigen::Matrix< float, Eigen::Dynamic, Eigen::Dynamic > coord(3, num_atoms); - for (size_t i = 0; i < num_atoms; ++i) { - coord.col(i) = tck[i]; - } + // copy coordinates to matrix in Eigen format + size_t num_atoms = tck.size(); + Eigen::Matrix< float, Eigen::Dynamic, Eigen::Dynamic > coord(3, num_atoms); + for (size_t i = 0; i < num_atoms; ++i) { + coord.col(i) = tck[i]; + } - // calculate centroid - Eigen::Vector3 centroid(coord.row(0).mean(), coord.row(1).mean(), coord.row(2).mean()); + // calculate centroid + Eigen::Vector3 centroid(coord.row(0).mean(), coord.row(1).mean(), coord.row(2).mean()); - // subtract centroid - coord.row(0).array() -= centroid(0); - coord.row(1).array() -= centroid(1); - coord.row(2).array() -= centroid(2); + // subtract centroid + coord.row(0).array() -= centroid(0); + coord.row(1).array() -= centroid(1); + coord.row(2).array() -= centroid(2); - // we only need the left-singular matrix here - // http://math.stackexchange.com/questions/99299/best-fitting-plane-given-a-set-of-points - auto svd = coord.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV); - Eigen::Vector3f plane_normal = svd.matrixU().rightCols<1>(); - return plane_normal; + // we only need the left-singular matrix here + // http://math.stackexchange.com/questions/99299/best-fitting-plane-given-a-set-of-points + auto svd = coord.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV); + Eigen::Vector3f plane_normal = svd.matrixU().rightCols<1>(); + return plane_normal; } void computeNormals ( const Streamline& tck, Streamline& normals) { - Eigen::Vector3f sPrev, sNext, pt1, pt2, n, normal; - sPrev = (tck[1] - tck[0]).normalized(); - - // Find a good starting normal - for (size_t idx = 1; idx < tck.size(); idx++) { - pt1 = tck[idx]; - pt2 = tck[idx+1]; - sNext = (pt2 - pt1).normalized(); - n = sPrev.cross(sNext); - if ( n.norm() > 1.0E-3 ) { - normal = n; - sPrev = sNext; - break; - } + Eigen::Vector3f sPrev, sNext, pt1, pt2, n, normal; + sPrev = (tck[1] - tck[0]).normalized(); + + // Find a good starting normal + for (size_t idx = 1; idx < tck.size(); idx++) { + pt1 = tck[idx]; + pt2 = tck[idx+1]; + sNext = (pt2 - pt1).normalized(); + n = sPrev.cross(sNext); + if ( n.norm() > 1.0E-3 ) { + normal = n; + sPrev = sNext; + break; + } + } + normal.normalize(); // vtkPolyLine.cxx:170 + for (size_t idx = 0; idx < tck.size(); idx++) { + pt1 = tck[idx]; + pt2 = tck[idx+1]; + sNext = (pt2 - pt1).normalized(); + + // compute rotation vector vtkPolyLine.cxx:187 + auto w = sPrev.cross(normal); + if ( w.norm() == 0.0 ) { + // copy the normal and continue + normals.push_back ( normal ); + continue; + } + // compute rotation of line segment + auto q = sNext.cross(sPrev); + if ( q.norm() == 0.0 ) { + // copy the normal and continue + normals.push_back ( normal ); + continue; + } + auto f1 = q.dot(normal); + auto f2 = 1.0 - ( f1 * f1 ); + if ( f2 > 0.0 ) { + f2 = sqrt(1.0 - (f1*f1)); + } else { + f2 = 0.0; } - normal.normalize(); // vtkPolyLine.cxx:170 - for (size_t idx = 0; idx < tck.size(); idx++) { - pt1 = tck[idx]; - pt2 = tck[idx+1]; - sNext = (pt2 - pt1).normalized(); - - // compute rotation vector vtkPolyLine.cxx:187 - auto w = sPrev.cross(normal); - if ( w.norm() == 0.0 ) { - // copy the normal and continue - normals.push_back ( normal ); - continue; - } - // compute rotation of line segment - auto q = sNext.cross(sPrev); - if ( q.norm() == 0.0 ) { - // copy the normal and continue - normals.push_back ( normal ); - continue; - } - auto f1 = q.dot(normal); - auto f2 = 1.0 - ( f1 * f1 ); - if ( f2 > 0.0 ) { - f2 = sqrt(1.0 - (f1*f1)); - } else { - f2 = 0.0; - } - auto c = (sNext + sPrev).normalized(); - w = c.cross(q); - c = sPrev.cross(q); - if ( ( normal.dot(c) * w.dot(c)) < 0 ) { - f2 = -1.0 * f2; - } - normals.push_back(normal); - sPrev = sNext; - normal = ( f1 * q ) + (f2 * w); + auto c = (sNext + sPrev).normalized(); + w = c.cross(q); + c = sPrev.cross(q); + if ( ( normal.dot(c) * w.dot(c)) < 0 ) { + f2 = -1.0 * f2; } + normals.push_back(normal); + sPrev = sNext; + normal = ( f1 * q ) + (f2 * w); + } } bool operator() (const Streamline& intck) { - // Need at least 5 points, silently ignore... - if (intck.size() < size_t(increment * 3)) { return true; } - - auto nSides = sides; - Eigen::MatrixXf coords(nSides,2); - Eigen::MatrixXi faces(nSides,6); - auto theta = 2.0 * Math::pi / float(nSides); - for ( auto i = 0; i < nSides; i++ ) { - coords(i,0) = cos((double)i*theta); - coords(i,1) = sin((double)i*theta); - // Face offsets - faces(i,0) = i; - faces(i,1) = (i+1) % nSides; - faces(i,2) = i+nSides; - faces(i,3) = (i+1) % nSides; - faces(i,4) = (i+1) % nSides + nSides; - faces(i,5) = i+nSides; - } + // Need at least 5 points, silently ignore... + if (intck.size() < size_t(increment * 3)) { return true; } + + auto nSides = sides; + Eigen::MatrixXf coords(nSides,2); + Eigen::MatrixXi faces(nSides,6); + auto theta = 2.0 * Math::pi / float(nSides); + for ( auto i = 0; i < nSides; i++ ) { + coords(i,0) = cos((double)i*theta); + coords(i,1) = sin((double)i*theta); + // Face offsets + faces(i,0) = i; + faces(i,1) = (i+1) % nSides; + faces(i,2) = i+nSides; + faces(i,3) = (i+1) % nSides; + faces(i,4) = (i+1) % nSides + nSides; + faces(i,5) = i+nSides; + } - // to handle the increment, we want to keep the first 2 and last 2 points, but we can skip inside - Streamline tck; + // to handle the increment, we want to keep the first 2 and last 2 points, but we can skip inside + Streamline tck; - // Push on the first 2 points - tck.push_back(intck[0]); - tck.push_back(intck[1]); - for (size_t idx = 3; idx < intck.size() - 2; idx += increment) { - tck.push_back(intck[idx]); + // Push on the first 2 points + tck.push_back(intck[0]); + tck.push_back(intck[1]); + for (size_t idx = 3; idx < intck.size() - 2; idx += increment) { + tck.push_back(intck[idx]); + } + tck.push_back(intck[intck.size()-2]); + tck.push_back(intck[intck.size()-1]); + + Streamline normals; + this->computeNormals(tck,normals); + auto globalNormal = computeNormal(tck); + Eigen::Vector3f sNext = tck[1] - tck[0]; + auto isFirst = true; + for (size_t idx = 1; idx < tck.size() - 1; ++idx) { + auto isLast = idx == tck.size() - 2; + + // vtkTubeFilter.cxx:386 + Eigen::Vector3f p = tck[idx]; + Eigen::Vector3f pNext = tck[idx+1]; + Eigen::Vector3f sPrev = sNext; + sNext = pNext - p; + Eigen::Vector3f n = normals[idx]; + + sNext.normalize(); + if ( sNext.norm() == 0.0 ) { + continue; } - tck.push_back(intck[intck.size()-2]); - tck.push_back(intck[intck.size()-1]); - - Streamline normals; - this->computeNormals(tck,normals); - auto globalNormal = computeNormal(tck); - Eigen::Vector3f sNext = tck[1] - tck[0]; - auto isFirst = true; - for (size_t idx = 1; idx < tck.size() - 1; ++idx) { - auto isLast = idx == tck.size() - 2; - - // vtkTubeFilter.cxx:386 - Eigen::Vector3f p = tck[idx]; - Eigen::Vector3f pNext = tck[idx+1]; - Eigen::Vector3f sPrev = sNext; - sNext = pNext - p; - Eigen::Vector3f n = normals[idx]; - - sNext.normalize(); - if ( sNext.norm() == 0.0 ) { - continue; - } - // Average vectors - Eigen::Vector3f s = ( sPrev + sNext ) / 2.0; - s.normalize(); - if ( s.norm() == 0.0 ) { - s = sPrev.cross(n).normalized(); - } + // Average vectors + Eigen::Vector3f s = ( sPrev + sNext ) / 2.0; + s.normalize(); + if ( s.norm() == 0.0 ) { + s = sPrev.cross(n).normalized(); + } - auto T = s; - auto N = T.cross(globalNormal).normalized(); - auto B = T.cross(N).normalized(); - N = B.cross(T).normalized(); - - - // have our coordinate frame, now add circles - for ( auto sideIdx = 0; sideIdx < nSides; sideIdx++ ) { - auto sidePoint = p + radius * ( N * coords(sideIdx,0) + B * coords(sideIdx,1)); - vertexOF << sidePoint[0] << " "<< sidePoint[1] << " " << sidePoint[2] << " "; - vertexOF << (int)( 255 * fabs(T[0])) << " " << (int)( 255 * fabs(T[1])) << " " << (int)( 255 * fabs(T[2])) << "\n"; - if ( !isLast ) { - faceOF << "3" - << " " << num_vertices + faces(sideIdx,0) - << " " << num_vertices + faces(sideIdx,1) - << " " << num_vertices + faces(sideIdx,2) << "\n"; - faceOF << "3" - << " " << num_vertices + faces(sideIdx,3) - << " " << num_vertices + faces(sideIdx,4) - << " " << num_vertices + faces(sideIdx,5) << "\n"; - num_faces += 2; - } - } - // Cap the first point, remebering the right hand rule - if ( isFirst ) { - for ( auto sideIdx = nSides - 1; sideIdx >= 2; --sideIdx ) { - faceOF << "3" - << " " << num_vertices + sideIdx - << " " << num_vertices + sideIdx - 1 - << " " << num_vertices << "\n"; - } - num_faces += nSides - 2; - isFirst = false; - } - if ( isLast ) { - // faceOF << "Writing end cap, num_vertices = " << num_vertices << "\n"; - for ( auto sideIdx = 2; sideIdx <= nSides - 1; ++sideIdx ) { - faceOF << "3" - << " " << num_vertices - << " " << num_vertices + sideIdx - << " " << num_vertices + sideIdx - 1 << "\n"; - } - num_faces += nSides - 2; - } - // We needed to maintain the number of vertices for the caps, now increment for the "circles" - num_vertices += nSides; + auto T = s; + auto N = T.cross(globalNormal).normalized(); + auto B = T.cross(N).normalized(); + N = B.cross(T).normalized(); + + + // have our coordinate frame, now add circles + for ( auto sideIdx = 0; sideIdx < nSides; sideIdx++ ) { + auto sidePoint = p + radius * ( N * coords(sideIdx,0) + B * coords(sideIdx,1)); + vertexOF << sidePoint[0] << " "<< sidePoint[1] << " " << sidePoint[2] << " "; + vertexOF << (int)( 255 * fabs(T[0])) << " " << (int)( 255 * fabs(T[1])) << " " << (int)( 255 * fabs(T[2])) << "\n"; + if ( !isLast ) { + faceOF << "3" + << " " << num_vertices + faces(sideIdx,0) + << " " << num_vertices + faces(sideIdx,1) + << " " << num_vertices + faces(sideIdx,2) << "\n"; + faceOF << "3" + << " " << num_vertices + faces(sideIdx,3) + << " " << num_vertices + faces(sideIdx,4) + << " " << num_vertices + faces(sideIdx,5) << "\n"; + num_faces += 2; + } } - return true; + // Cap the first point, remebering the right hand rule + if ( isFirst ) { + for ( auto sideIdx = nSides - 1; sideIdx >= 2; --sideIdx ) { + faceOF << "3" + << " " << num_vertices + sideIdx + << " " << num_vertices + sideIdx - 1 + << " " << num_vertices << "\n"; + } + num_faces += nSides - 2; + isFirst = false; + } + if ( isLast ) { + // faceOF << "Writing end cap, num_vertices = " << num_vertices << "\n"; + for ( auto sideIdx = 2; sideIdx <= nSides - 1; ++sideIdx ) { + faceOF << "3" + << " " << num_vertices + << " " << num_vertices + sideIdx + << " " << num_vertices + sideIdx - 1 << "\n"; + } + num_faces += nSides - 2; + } + // We needed to maintain the number of vertices for the caps, now increment for the "circles" + num_vertices += nSides; + } + return true; } ~PLYWriter() { @@ -493,30 +518,30 @@ class PLYWriter: public WriterInterface { MEMALIGN(PLYWriter) faceOF.close(); out << - "ply\n" - "format ascii 1.0\n" - "comment written by tckconvert v" << App::mrtrix_version << "\n" - "comment part of the mtrix3 suite of tools (http://www.mrtrix.org/)\n" - "element vertex " << num_vertices << "\n" - "property float32 x\n" - "property float32 y\n" - "property float32 z\n" - "property uchar red\n" - "property uchar green\n" - "property uchar blue\n" - "element face " << num_faces << "\n" - "property list uint8 int32 vertex_indices\n" - "end_header\n"; + "ply\n" + "format ascii 1.0\n" + "comment written by tckconvert v" << App::mrtrix_version << "\n" + "comment part of the mtrix3 suite of tools (http://www.mrtrix.org/)\n" + "element vertex " << num_vertices << "\n" + "property float32 x\n" + "property float32 y\n" + "property float32 z\n" + "property uchar red\n" + "property uchar green\n" + "property uchar blue\n" + "element face " << num_faces << "\n" + "property list uint8 int32 vertex_indices\n" + "end_header\n"; std::ifstream vertexIF (vertexFilename); out << vertexIF.rdbuf(); vertexIF.close(); - File::unlink (vertexFilename); + File::remove (vertexFilename); std::ifstream faceIF (faceFilename); out << faceIF.rdbuf(); faceIF.close(); - File::unlink (faceFilename); + File::remove (faceFilename); out.close(); } catch (Exception& e) { @@ -525,7 +550,7 @@ class PLYWriter: public WriterInterface { MEMALIGN(PLYWriter) } } -private: + private: std::string vertexFilename; std::string faceFilename; File::OFStream out; @@ -539,103 +564,116 @@ class PLYWriter: public WriterInterface { MEMALIGN(PLYWriter) }; + + + + + + + + class RibWriter: public WriterInterface { MEMALIGN(RibWriter) -public: -RibWriter(const std::string& file, float radius = 0.1, bool dec = false) : out(file), writeDEC(dec), radius(radius), hasPoints(false), wroteHeader(false) { - pointsFilename = File::create_tempfile(0,".points"); - pointsOF.open(pointsFilename ); - pointsOF << "\"P\" ["; - decFilename = File::create_tempfile(0,".dec"); - decOF.open ( decFilename ); - decOF << "\"varying color dec\" ["; - // Header - out << "##RenderMan RIB\n" - << "# Written by tckconvert\n" - << "# Part of the MRtrix package (http://mrtrix.org)\n" - << "# version: " << App::mrtrix_version << "\n"; + public: + RibWriter(const std::string& file, float radius = 0.1, bool dec = false) : + out(file), writeDEC(dec), radius(radius), + hasPoints(false), wroteHeader(false) { + pointsFilename = File::create_tempfile (0,"points"); + pointsOF.open (pointsFilename); + pointsOF << "\"P\" ["; + decFilename = File::create_tempfile (0,"dec"); + decOF.open (decFilename); + decOF << "\"varying color dec\" ["; + // Header + out << "##RenderMan RIB\n" + << "# Written by tckconvert\n" + << "# Part of the MRtrix package (http://mrtrix.org)\n" + << "# version: " << App::mrtrix_version << "\n"; -} + } - bool operator() (const Streamline& tck) { - if ( tck.size() < 3 ) { - return true; - } + bool operator() (const Streamline& tck) { + if ( tck.size() < 3 ) { + return true; + } - hasPoints = true; - if ( !wroteHeader ) { - wroteHeader = true; - // Start writing the header - out << "Basis \"catmull-rom\" 1 \"catmull-rom\" 1\n" + hasPoints = true; + if ( !wroteHeader ) { + wroteHeader = true; + // Start writing the header + out << "Basis \"catmull-rom\" 1 \"catmull-rom\" 1\n" << "Attribute \"dice\" \"int roundcurve\" [1] \"int hair\" [1]\n" << "Curves \"linear\" ["; - } - out << tck.size() << " "; - Eigen::Vector3f prev = tck[1]; - for ( auto pt : tck ) { - pointsOF << pt[0] << " " << pt[1] << " " << pt[2] << " "; - // Should we write the dec? - if ( writeDEC ) { - Eigen::Vector3f T = ( prev - pt ).normalized(); - decOF << fabs(T[0]) << " " << fabs(T[1]) << " " << fabs(T[2]) << " "; - prev = pt; } + out << tck.size() << " "; + Eigen::Vector3f prev = tck[1]; + for ( auto pt : tck ) { + pointsOF << pt[0] << " " << pt[1] << " " << pt[2] << " "; + // Should we write the dec? + if ( writeDEC ) { + Eigen::Vector3f T = ( prev - pt ).normalized(); + decOF << fabs(T[0]) << " " << fabs(T[1]) << " " << fabs(T[2]) << " "; + prev = pt; + } + } + return true; } - return true; - } - ~RibWriter() { - try { - if ( hasPoints ) { - pointsOF << "]\n" ; + ~RibWriter() { + try { + + if (hasPoints) { + pointsOF << "]\n" ; + decOF << "]\n" ; + } + pointsOF.close(); - decOF << "]\n" ; decOF.close(); - out << "] \"nonperiodic\" "; - std::ifstream pointsIF ( pointsFilename ); - out << pointsIF.rdbuf(); + if (hasPoints) { + out << "] \"nonperiodic\" "; - if ( writeDEC ) { - std::ifstream decIF ( decFilename ); - out << decIF.rdbuf(); - decIF.close(); - } + std::ifstream pointsIF ( pointsFilename ); + out << pointsIF.rdbuf(); - out << " \"constantwidth\" " << radius << "\n"; - } + if ( writeDEC ) { + std::ifstream decIF ( decFilename ); + out << decIF.rdbuf(); + decIF.close(); + } - out.close(); + out << " \"constantwidth\" " << radius << "\n"; + } - File::unlink (pointsFilename); - File::unlink (decFilename); + out.close(); + + File::remove (pointsFilename); + File::remove (decFilename); - } catch (Exception& e) { - e.display(); - App::exit_error_code = 1; + } catch (Exception& e) { + e.display(); + App::exit_error_code = 1; + } } - } private: - std::string pointsFilename; - std::string decFilename; - File::OFStream out; - File::OFStream pointsOF; - File::OFStream decOF; - bool writeDEC; - float radius; - bool hasPoints; - bool wroteHeader; + std::string pointsFilename; + std::string decFilename; + File::OFStream out; + File::OFStream pointsOF; + File::OFStream decOF; + bool writeDEC; + float radius; + bool hasPoints; + bool wroteHeader; }; -bool has_suffix(const std::string &str, const std::string &suffix) -{ - return str.size() >= suffix.size() && - str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0; -} + + + @@ -643,90 +681,90 @@ bool has_suffix(const std::string &str, const std::string &suffix) void run () { - // Reader - Properties properties; - std::unique_ptr > reader; - if (has_suffix(argument[0], ".tck")) { - reader.reset( new Reader(argument[0], properties) ); - } - else if (has_suffix(argument[0], ".txt")) { - reader.reset( new ASCIIReader(argument[0]) ); - } - else if (has_suffix(argument[0], ".vtk")) { - reader.reset( new VTKReader(argument[0]) ); - } - else { - throw Exception("Unsupported input file type."); - } + // Reader + Properties properties; + std::unique_ptr > reader; + if (Path::has_suffix(argument[0], ".tck")) { + reader.reset( new Reader(argument[0], properties) ); + } + else if (Path::has_suffix(argument[0], ".txt")) { + reader.reset( new ASCIIReader(argument[0]) ); + } + else if (Path::has_suffix(argument[0], ".vtk")) { + reader.reset( new VTKReader(argument[0]) ); + } + else { + throw Exception("Unsupported input file type."); + } - // Writer - std::unique_ptr > writer; - if (has_suffix(argument[1], ".tck")) { - writer.reset( new Writer(argument[1], properties) ); - } - else if (has_suffix(argument[1], ".vtk")) { - writer.reset( new VTKWriter(argument[1]) ); - } - else if (has_suffix(argument[1], ".ply")) { - auto increment = get_options("increment").size() ? get_options("increment")[0][0].as_int() : 1; - auto radius = get_options("radius").size() ? get_options("radius")[0][0].as_float() : 0.1f; - auto sides = get_options("sides").size() ? get_options("sides")[0][0].as_int() : 5; - writer.reset( new PLYWriter(argument[1], increment, radius, sides) ); - } - else if (has_suffix(argument[1], ".rib")) { - writer.reset( new RibWriter(argument[1]) ); - } - else if (has_suffix(argument[1], ".txt")) { - writer.reset( new ASCIIWriter(argument[1]) ); - } - else { - throw Exception("Unsupported output file type."); - } + // Writer + std::unique_ptr > writer; + if (Path::has_suffix(argument[1], ".tck")) { + writer.reset( new Writer(argument[1], properties) ); + } + else if (Path::has_suffix(argument[1], ".vtk")) { + writer.reset( new VTKWriter(argument[1]) ); + } + else if (Path::has_suffix(argument[1], ".ply")) { + auto increment = get_options("increment").size() ? get_options("increment")[0][0].as_int() : 1; + auto radius = get_options("radius").size() ? get_options("radius")[0][0].as_float() : 0.1f; + auto sides = get_options("sides").size() ? get_options("sides")[0][0].as_int() : 5; + writer.reset( new PLYWriter(argument[1], increment, radius, sides) ); + } + else if (Path::has_suffix(argument[1], ".rib")) { + writer.reset( new RibWriter(argument[1]) ); + } + else if (Path::has_suffix(argument[1], ".txt")) { + writer.reset( new ASCIIWriter(argument[1]) ); + } + else { + throw Exception("Unsupported output file type."); + } - // Tranform matrix - transform_type T; - T.setIdentity(); - size_t nopts = 0; - auto opt = get_options("scanner2voxel"); - if (opt.size()) { - auto header = Header::open(opt[0][0]); - T = Transform(header).scanner2voxel; - nopts++; - } - opt = get_options("scanner2image"); - if (opt.size()) { - auto header = Header::open(opt[0][0]); - T = Transform(header).scanner2image; - nopts++; - } - opt = get_options("voxel2scanner"); - if (opt.size()) { - auto header = Header::open(opt[0][0]); - T = Transform(header).voxel2scanner; - nopts++; - } - opt = get_options("image2scanner"); - if (opt.size()) { - auto header = Header::open(opt[0][0]); - T = Transform(header).image2scanner; - nopts++; - } - if (nopts > 1) { - throw Exception("Transform options are mutually exclusive."); - } + // Tranform matrix + transform_type T; + T.setIdentity(); + size_t nopts = 0; + auto opt = get_options("scanner2voxel"); + if (opt.size()) { + auto header = Header::open(opt[0][0]); + T = Transform(header).scanner2voxel; + nopts++; + } + opt = get_options("scanner2image"); + if (opt.size()) { + auto header = Header::open(opt[0][0]); + T = Transform(header).scanner2image; + nopts++; + } + opt = get_options("voxel2scanner"); + if (opt.size()) { + auto header = Header::open(opt[0][0]); + T = Transform(header).voxel2scanner; + nopts++; + } + opt = get_options("image2scanner"); + if (opt.size()) { + auto header = Header::open(opt[0][0]); + T = Transform(header).image2scanner; + nopts++; + } + if (nopts > 1) { + throw Exception("Transform options are mutually exclusive."); + } - // Copy - Streamline tck; - while ( (*reader)(tck) ) - { - for (auto& pos : tck) { - pos = T.cast() * pos; - } - (*writer)(tck); + // Copy + Streamline tck; + while ( (*reader)(tck) ) + { + for (auto& pos : tck) { + pos = T.cast() * pos; } + (*writer)(tck); + } } diff --git a/cmd/tckdfc.cpp b/cmd/tckdfc.cpp index cf08daf1ea..887ddadd69 100644 --- a/cmd/tckdfc.cpp +++ b/cmd/tckdfc.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "exception.h" #include "header.h" @@ -127,7 +128,7 @@ OPTIONS "the streamline trajectory until a valid timeseries is found") + Option ("upsample", - "upsample the tracks by some ratio using Hermite interpolation before mappipng \n" + "upsample the tracks by some ratio using Hermite interpolation before mapping " "(if omitted, an appropriate ratio will be determined automatically)") + Argument ("factor").type_integer (1); diff --git a/cmd/tckedit.cpp b/cmd/tckedit.cpp index 37ada0e6f8..2f313db77d 100644 --- a/cmd/tckedit.cpp +++ b/cmd/tckedit.cpp @@ -1,24 +1,25 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" #include "exception.h" #include "mrtrix.h" -#include "thread_queue.h" +#include "ordered_thread_queue.h" #include "types.h" #include "dwi/tractography/file.h" @@ -50,13 +51,46 @@ void usage () SYNOPSIS = "Perform various editing operations on track files"; DESCRIPTION - + "This command can be used to perform various manipulations on track data. " - "This includes: " - "merging data from multiple track files into one; " - "extracting only a finite number of tracks; " - "selecting a subset of tracks based on various criteria, for instance regions of interest." + + "This command can be used to perform various types of manipulations " + "on track data. A range of such manipulations are demonstrated in the " + "examples provided below."; + + EXAMPLES + + Example ("Concatenate data from multiple track files into one", + "tckedit *.tck all_tracks.tck", + "Here the wildcard operator is used to select all files in the " + "current working directory that have the .tck filetype suffix; but " + "input files can equivalently be specified one at a time explicitly.") + + + Example ("Extract a reduced number of streamlines", + "tckedit in_many.tck out_few.tck -number 1k -skip 500", + "The number of streamlines requested would typically be less " + "than the number of streamlines in the input track file(s); if it " + "is instead greater, then the command will issue a warning upon " + "completion. By default the streamlines for the output file are " + "extracted from the start of the input file(s); in this example the " + "command is instead instructed to skip the first 500 streamlines, and " + "write to the output file streamlines 501-1500.") + + + Example ("Extract streamlines based on selection criteria", + "tckedit in.tck out.tck -include ROI1.mif -include ROI2.mif -minlength 25", + "Multiple criteria can be added in a single invocation of tckedit, " + "and a streamline must satisfy all criteria imposed in order to be " + "written to the output file. Note that both -include and -exclude " + "options can be specified multiple times to provide multiple " + "waypoints / exclusion masks.") + + + Example ("Select only those streamline vertices within a mask", + "tckedit in.tck cropped.tck -mask mask.mif", + "The -mask option is applied to each streamline vertex independently, " + "rather than to each streamline, retaining only those streamline vertices " + "within the mask. As such, use of this option may result in a greater " + "number of output streamlines than input streamlines, as a single input " + "streamline may have the vertices at either endpoint retained but some " + "vertices at its midpoint removed, effectively cutting one long streamline " + "into multiple shorter streamlines."); + - + DWI::Tractography::preserve_track_order_desc; ARGUMENTS + Argument ("tracks_in", "the input track file(s)").type_tracks_in().allow_multiple() @@ -69,8 +103,9 @@ void usage () + WeightsOption + OptionGroup ("Other options specific to tckedit") - + Option ("inverse", "output the inverse selection of streamlines based on the criteria provided, " - "i.e. only those streamlines that fail at least one criterion will be written to file.") + + Option ("inverse", "output the inverse selection of streamlines based on the criteria provided; " + "i.e. only those streamlines that fail at least one selection criterion, " + "and/or vertices that are outside masks if provided, will be written to file") + Option ("ends_only", "only test the ends of each streamline against the provided include/exclude ROIs") @@ -115,31 +150,38 @@ void run () input_file_list.push_back (argument[file_index]); Properties p; - Reader reader (argument[file_index], p); + Reader (argument[file_index], p); - for (vector::const_iterator i = p.comments.begin(); i != p.comments.end(); ++i) { + for (const auto& i : p.comments) { bool present = false; - for (vector::const_iterator j = properties.comments.begin(); !present && j != properties.comments.end(); ++j) - present = (*i == *j); + for (const auto& j: properties.comments) + if ( (present = (i == j)) ) + break; if (!present) - properties.comments.push_back (*i); + properties.comments.push_back (i); } - // ROI paths are ignored - otherwise tckedit will try to find the ROIs used - // during streamlines generation! + for (const auto& i : p.prior_rois) { + const auto potential_matches = properties.prior_rois.equal_range (i.first); + bool present = false; + for (auto j = potential_matches.first; !present && j != potential_matches.second; ++j) + present = (i.second == j->second); + if (!present) + properties.prior_rois.insert (i); + } size_t this_count = 0, this_total_count = 0; - for (Properties::const_iterator i = p.begin(); i != p.end(); ++i) { - if (i->first == "count") { - this_count = to(i->second); - } else if (i->first == "total_count") { - this_total_count += to(i->second); + for (const auto& i : p) { + if (i.first == "count") { + this_count = to (i.second); + } else if (i.first == "total_count") { + this_total_count += to (i.second); } else { - Properties::iterator existing = properties.find (i->first); + auto existing = properties.find (i.first); if (existing == properties.end()) - properties.insert (*i); - else if (i->second != existing->second) + properties.insert (i); + else if (i.second != existing->second) existing->second = "variable"; } } @@ -151,6 +193,7 @@ void run () DEBUG ("estimated number of input tracks: " + str(count)); load_rois (properties); + properties.compare_stepsize_rois(); // Some properties from tracking may be overwritten by this editing process // Due to the potential use of masking, we have no choice but to clear the @@ -172,15 +215,12 @@ void run () Loader loader (input_file_list); Worker worker (properties, inverse, ends_only); - // This needs to be run AFTER creation of the Worker class - // (worker needs to be able to set max & min number of points based on step size in input file, - // receiver needs "output_step_size" field to have been updated before file creation) Receiver receiver (output_path, properties, number, skip); - Thread::run_queue ( - loader, + Thread::run_ordered_queue ( + loader, Thread::batch (Streamline<>()), - Thread::multi (worker), + Thread::multi (worker), Thread::batch (Streamline<>()), receiver); diff --git a/cmd/tckgen.cpp b/cmd/tckgen.cpp index 38f03c6bee..4bd95289c4 100644 --- a/cmd/tckgen.cpp +++ b/cmd/tckgen.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" @@ -54,12 +55,12 @@ void usage () SYNOPSIS = "Perform streamlines tractography"; - DESCRIPTION + DESCRIPTION + "By default, tckgen produces a fixed number of streamlines, by attempting " "to seed from new random locations until the target number of " "streamlines have been selected (in other words, after all inclusion & " "exclusion criteria have been applied), or the maximum number of seeds " - "has been exceeded (by default, this is 1000× the desired number of selected " + "has been exceeded (by default, this is 1000 x the desired number of selected " "streamlines). Use the -select and/or -seeds options to modify as " "required. See also the Seeding options section for alternative seeding " "strategies." @@ -128,9 +129,15 @@ void usage () "voxel for each streamline. These data are then sampled via trilinear " "interpolation at each streamline step, the diffusion tensor model is fitted, " "and the streamline follows the orientation of the principal eigenvector of " - "that tensor."; + "that tensor." + + + "Note that the behaviour of the -angle option varies slightly depending on the " + "order of integration: for any first-order method, this angle corresponds to the " + "deviation in streamline trajectory per step; for higher-order methods, this " + "corresponds to the change in underlying fibre orientation between the start and " + "end points of each step."; - REFERENCES + REFERENCES + "References based on streamlines algorithm used:" + "* FACT:\n" @@ -206,7 +213,8 @@ void usage () + DWI::Tractography::ACT::ACTOption - + DWI::Tractography::Algorithms::iFOD2Option + + DWI::Tractography::Algorithms::iFODOptions + + DWI::Tractography::Algorithms::iFOD2Options + DWI::GradImportOptions(); @@ -227,18 +235,25 @@ void run () auto opt = get_options ("algorithm"); if (opt.size()) algorithm = opt[0][0]; - load_rois (properties); - Tracking::load_streamline_properties (properties); + ACT::load_act_properties (properties); Seeding::load_seed_mechanisms (properties); Seeding::load_seed_parameters (properties); + if (algorithm == 1 || algorithm == 2) + Algorithms::load_iFOD_options (properties); if (algorithm == 2) Algorithms::load_iFOD2_options (properties); + + //load ROIs and tractography specific options + //NB must occur before seed check below due to -select option override + Tracking::load_streamline_properties_and_rois (properties); + properties.compare_stepsize_rois(); + // Check validity of options -select and -seeds; these are meaningless if seeds are number-limited // By over-riding the values in properties, the progress bar should still be valid if (properties.seeds.is_finite()) { @@ -253,6 +268,8 @@ void run () } + + switch (algorithm) { case 0: Exec ::run (argument[0], argument[1], properties); @@ -286,4 +303,3 @@ void run () } } - diff --git a/cmd/tckglobal.cpp b/cmd/tckglobal.cpp index 9775341e5d..9569d225f7 100644 --- a/cmd/tckglobal.cpp +++ b/cmd/tckglobal.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include diff --git a/cmd/tckinfo.cpp b/cmd/tckinfo.cpp index 1c14e0ef17..2ecef1ca8c 100644 --- a/cmd/tckinfo.cpp +++ b/cmd/tckinfo.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "file/ofstream.h" @@ -54,7 +55,10 @@ void run () for (Tractography::Properties::iterator i = properties.begin(); i != properties.end(); ++i) { std::string S (i->first + ':'); S.resize (22, ' '); - std::cout << " " << S << i->second << "\n"; + const auto lines = split_lines (i->second); + std::cout << " " << S << lines[0] << "\n"; + for (size_t i = 1; i != lines.size(); ++i) + std::cout << " " << lines[i] << "\n"; } if (properties.comments.size()) { @@ -63,7 +67,7 @@ void run () std::cout << (i == properties.comments.begin() ? "" : " ") << *i << "\n"; } - for (std::multimap::const_iterator i = properties.roi.begin(); i != properties.roi.end(); ++i) + for (std::multimap::const_iterator i = properties.prior_rois.begin(); i != properties.prior_rois.end(); ++i) std::cout << " ROI: " << i->first << " " << i->second << "\n"; diff --git a/cmd/tckmap.cpp b/cmd/tckmap.cpp index 2dcc510d51..e092e72e22 100644 --- a/cmd/tckmap.cpp +++ b/cmd/tckmap.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "command.h" diff --git a/cmd/tckresample.cpp b/cmd/tckresample.cpp index c9731e9adb..4e0f66e058 100644 --- a/cmd/tckresample.cpp +++ b/cmd/tckresample.cpp @@ -1,23 +1,24 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "math/math.h" #include "image.h" +#include "ordered_thread_queue.h" #include "thread.h" -#include "thread_queue.h" #include "dwi/tractography/file.h" #include "dwi/tractography/properties.h" #include "dwi/tractography/resampling/arc.h" @@ -44,11 +45,17 @@ void usage () SYNOPSIS = "Resample each streamline in a track file to a new set of vertices"; DESCRIPTION - + "This may be either increasing or decreasing the number of samples along " - "each streamline, or changing the positions of the samples according to " - "some specified trajectory." - - + DWI::Tractography::preserve_track_order_desc; + + "It is necessary to specify precisely ONE of the command-line options for " + "controlling how this resampling takes place; this may be either increasing " + "or decreasing the number of samples along each streamline, or may involve " + "changing the positions of the samples according to some specified trajectory." + + + "Note that because the length of a streamline is calculated based on the sums of " + "distances between adjacent vertices, resampling a streamline to a new set of " + "vertices will typically change the quantified length of that streamline; the " + "magnitude of the difference will typically depend on the discrepancy in the " + "number of vertices, with less vertices leading to a shorter length (due to " + "taking chordal lengths of curved trajectories)."; ARGUMENTS + Argument ("in_tracks", "the input track file").type_tracks_in() @@ -115,33 +122,12 @@ void run () const std::unique_ptr resampler (Resampling::get_resampler()); - const float old_step_size = get_step_size (properties); - if (!std::isfinite (old_step_size)) { - INFO ("Do not have step size information from input track file"); - } - - float new_step_size = NaN; - if (dynamic_cast(resampler.get())) { - new_step_size = dynamic_cast (resampler.get())->get_step_size(); - } else if (std::isfinite (old_step_size)) { - if (dynamic_cast(resampler.get())) - new_step_size = old_step_size * dynamic_cast (resampler.get())->get_ratio(); - if (dynamic_cast(resampler.get())) - new_step_size = old_step_size / dynamic_cast (resampler.get())->get_ratio(); - } - properties["output_step_size"] = std::isfinite (new_step_size) ? str(new_step_size) : "variable"; - - auto downsample = properties.find ("downsample_factor"); - if (downsample != properties.end()) - properties.erase (downsample); - Worker worker (resampler); Receiver receiver (argument[1], properties); - Thread::run_queue (read, - Thread::batch (Streamline()), - Thread::multi (worker), - Thread::batch (Streamline()), - receiver); + Thread::run_ordered_queue (read, + Thread::batch (Streamline()), + Thread::multi (worker), + Thread::batch (Streamline()), + receiver); } - diff --git a/cmd/tcksample.cpp b/cmd/tcksample.cpp index 87dbaabed7..4753add792 100644 --- a/cmd/tcksample.cpp +++ b/cmd/tcksample.cpp @@ -1,24 +1,25 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "image_helpers.h" #include "memory.h" #include "thread.h" -#include "thread_queue.h" +#include "ordered_thread_queue.h" #include "dwi/tractography/file.h" #include "dwi/tractography/properties.h" #include "dwi/tractography/scalar_file.h" @@ -78,7 +79,7 @@ void usage () "processing a whole-brain tractogram, and images for which the " "quantiative parameter is additive)"); - + // TODO add support for SH amplitude along tangent // TODO add support for reading from fixel image // (this would supersede fixel2tsf when used without -precise or -stat_tck options) @@ -100,25 +101,25 @@ using vector_type = Eigen::VectorXf; -class TDI : public Image { MEMALIGN(TDI) +class TDI { MEMALIGN(TDI) public: - TDI (const Header& H, const size_t num_tracks) : - Image (Image::scratch (H, "TDI scratch image")), + TDI (Image& image, const size_t num_tracks) : + image (image), progress ("Generating initial TDI", num_tracks) { } + ~TDI () { progress.done(); } bool operator() (const DWI::Tractography::Mapping::SetVoxel& in) { - for (const auto v : in) { - assign_pos_of (v, 0, 3).to (*this); - value() += v.get_length(); + for (const auto& v : in) { + assign_pos_of (v, 0, 3).to (image); + image.value() += v.get_length(); } ++progress; return true; } - void done() { progress.done(); } - protected: + Image& image; ProgressBar progress; }; @@ -126,12 +127,12 @@ class TDI : public Image { MEMALIGN(TDI) template -class SamplerNonPrecise +class SamplerNonPrecise { MEMALIGN (SamplerNonPrecise) public: - SamplerNonPrecise (Image& image, const stat_tck statistic, MR::copy_ptr& precalc_tdi) : + SamplerNonPrecise (Image& image, const stat_tck statistic, const Image& precalc_tdi) : interp (image), - mapper (precalc_tdi ? new DWI::Tractography::Mapping::TrackMapperBase (image) : nullptr), + mapper (precalc_tdi.valid() ? new DWI::Tractography::Mapping::TrackMapperBase (image) : nullptr), tdi (precalc_tdi), statistic (statistic) { @@ -139,12 +140,12 @@ class SamplerNonPrecise mapper->set_use_precise_mapping (false); } - bool operator() (DWI::Tractography::Streamline<>& tck, std::pair& out) + bool operator() (DWI::Tractography::Streamline& tck, std::pair& out) { assert (statistic != stat_tck::NONE); - out.first = tck.index; + out.first = tck.get_index(); - std::pair values; + DWI::Tractography::TrackScalar values; (*this) (tck, values); if (statistic == MEAN) { @@ -158,7 +159,7 @@ class SamplerNonPrecise if (i < tck.size() - 1) length += (tck[i+1] - tck[i]).norm(); length *= 0.5; - integral += values.second[i] * length; + integral += values[i] * length; sum_lengths += length; } out.second = sum_lengths ? (integral / sum_lengths) : 0.0; @@ -166,16 +167,16 @@ class SamplerNonPrecise if (statistic == MEDIAN) { // Don't bother with a weighted median here vector data; - data.assign (values.second.data(), values.second.data() + values.second.size()); + data.assign (values.data(), values.data() + values.size()); out.second = Math::median (data); } else if (statistic == MIN) { out.second = std::numeric_limits::infinity(); for (size_t i = 0; i != tck.size(); ++i) - out.second = std::min (out.second, values.second[i]); + out.second = std::min (out.second, values[i]); } else if (statistic == MAX) { out.second = -std::numeric_limits::infinity(); for (size_t i = 0; i != tck.size(); ++i) - out.second = std::max (out.second, values.second[i]); + out.second = std::max (out.second, values[i]); } else { assert (0); } @@ -187,15 +188,15 @@ class SamplerNonPrecise return true; } - bool operator() (const DWI::Tractography::Streamline<>& tck, std::pair& out) + bool operator() (const DWI::Tractography::Streamline& tck, DWI::Tractography::TrackScalar& out) { - out.first = tck.index; - out.second.resize (tck.size()); + out.set_index (tck.get_index()); + out.resize (tck.size()); for (size_t i = 0; i != tck.size(); ++i) { if (interp.scanner (tck[i])) - out.second[i] = interp.value(); + out[i] = interp.value(); else - out.second[i] = std::numeric_limits::quiet_NaN(); + out[i] = value_type(0); } return true; } @@ -203,26 +204,26 @@ class SamplerNonPrecise private: Interp interp; std::shared_ptr mapper; - MR::copy_ptr tdi; + Image tdi; const stat_tck statistic; value_type get_tdi_multiplier (const DWI::Tractography::Mapping::Voxel& v) { - if (!tdi) + if (!tdi.valid()) return value_type(1); - assign_pos_of (v).to (*tdi); - assert (!is_out_of_bounds (*tdi)); - return v.get_length() / tdi->value(); + assign_pos_of (v).to (tdi); + assert (!is_out_of_bounds (tdi)); + return v.get_length() / tdi.value(); } }; -class SamplerPrecise +class SamplerPrecise { MEMALIGN (SamplerPrecise) public: - SamplerPrecise (Image& image, const stat_tck statistic, MR::copy_ptr& precalc_tdi) : + SamplerPrecise (Image& image, const stat_tck statistic, const Image& precalc_tdi) : image (image), mapper (new DWI::Tractography::Mapping::TrackMapperBase (image)), tdi (precalc_tdi), @@ -232,9 +233,9 @@ class SamplerPrecise mapper->set_use_precise_mapping (true); } - bool operator() (DWI::Tractography::Streamline<>& tck, std::pair& out) + bool operator() (DWI::Tractography::Streamline& tck, std::pair& out) { - out.first = tck.index; + out.first = tck.get_index(); value_type sum_lengths = value_type(0); DWI::Tractography::Mapping::SetVoxel voxels; @@ -242,7 +243,7 @@ class SamplerPrecise if (statistic == MEAN) { value_type integral = value_type(0.0); - for (const auto v : voxels) { + for (const auto& v : voxels) { assign_pos_of (v).to (image); integral += v.get_length() * (image.value() * get_tdi_multiplier (v)); sum_lengths += v.get_length(); @@ -260,7 +261,7 @@ class SamplerPrecise value_type value, length; }; vector data; - for (const auto v : voxels) { + for (const auto& v : voxels) { assign_pos_of (v).to (image); data.push_back (WeightSort (v, (image.value() * get_tdi_multiplier (v)))); sum_lengths += v.get_length(); @@ -269,7 +270,7 @@ class SamplerPrecise const value_type target_length = 0.5 * sum_lengths; sum_lengths = value_type(0.0); value_type prev_value = data.front().value; - for (const auto d : data) { + for (const auto& d : data) { if ((sum_lengths += d.length) > target_length) { out.second = prev_value; break; @@ -278,14 +279,14 @@ class SamplerPrecise } } else if (statistic == MIN) { out.second = std::numeric_limits::infinity(); - for (const auto v : voxels) { + for (const auto& v : voxels) { assign_pos_of (v).to (image); out.second = std::min (out.second, value_type (image.value() * get_tdi_multiplier (v))); sum_lengths += v.get_length(); } } else if (statistic == MAX) { out.second = -std::numeric_limits::infinity(); - for (const auto v : voxels) { + for (const auto& v : voxels) { assign_pos_of (v).to (image); out.second = std::max (out.second, value_type (image.value() * get_tdi_multiplier (v))); sum_lengths += v.get_length(); @@ -304,16 +305,16 @@ class SamplerPrecise private: Image image; std::shared_ptr mapper; - MR::copy_ptr tdi; + Image tdi; const stat_tck statistic; value_type get_tdi_multiplier (const DWI::Tractography::Mapping::Voxel& v) { - if (!tdi) + if (!tdi.valid()) return value_type(1); - assign_pos_of (v).to (*tdi); - assert (!is_out_of_bounds (*tdi)); - return v.get_length() / tdi->value(); + assign_pos_of (v).to (tdi); + assert (!is_out_of_bounds (tdi)); + return v.get_length() / tdi.value(); } }; @@ -381,21 +382,30 @@ class Receiver_NoStatistic : private ReceiverBase { MEMALIGN(Receiver_NoStatisti const DWI::Tractography::Properties& properties) : ReceiverBase (num_tracks) { - if (Path::has_suffix (path, ".tsf")) + if (Path::has_suffix (path, ".tsf")) { tsf.reset (new DWI::Tractography::ScalarWriter (path, properties)); - else + } else { ascii.reset (new File::OFStream (path)); + (*ascii) << "# " << App::command_history_string << "\n"; + } } Receiver_NoStatistic (const Receiver_NoStatistic&) = delete; - bool operator() (std::pair& in) + bool operator() (const DWI::Tractography::TrackScalar& in) { // Requires preservation of order - assert (in.first == ReceiverBase::received); - if (ascii) - (*ascii) << in.second.transpose() << "\n"; - else - (*tsf) (in.second); + assert (in.get_index() == ReceiverBase::received); + if (ascii) { + if (in.size()) { + auto i = in.begin(); + (*ascii) << *i; + for (++i; i != in.end(); ++i) + (*ascii) << " " << *i; + } + (*ascii) << "\n"; + } else { + (*tsf) (in); + } ++(*this); return true; } @@ -415,19 +425,13 @@ void execute_nostat (DWI::Tractography::Reader& reader, Image& image, const std::string& path) { - MR::copy_ptr no_tdi; - SamplerNonPrecise sampler (image, stat_tck::NONE, no_tdi); + SamplerNonPrecise sampler (image, stat_tck::NONE, Image()); Receiver_NoStatistic receiver (path, num_tracks, properties); - DWI::Tractography::Streamline tck; - std::pair values; - size_t counter = 0; - while (reader (tck)) { - sampler (tck, values); - receiver (values); - ++counter; - } - if (counter != num_tracks) - WARN ("Expected " + str(num_tracks) + " tracks based on header; read " + str(counter)); + Thread::run_ordered_queue (reader, + Thread::batch (DWI::Tractography::Streamline()), + Thread::multi (sampler), + Thread::batch (DWI::Tractography::TrackScalar()), + receiver); } template @@ -435,16 +439,16 @@ void execute (DWI::Tractography::Reader& reader, const size_t num_tracks, Image& image, const stat_tck statistic, - MR::copy_ptr& tdi, + Image& tdi, const std::string& path) { SamplerType sampler (image, statistic, tdi); Receiver_Statistic receiver (num_tracks); - Thread::run_queue (reader, - Thread::batch (DWI::Tractography::Streamline()), - Thread::multi (sampler), - Thread::batch (std::pair()), - receiver); + Thread::run_ordered_queue (reader, + Thread::batch (DWI::Tractography::Streamline()), + Thread::multi (sampler), + Thread::batch (std::pair()), + receiver); receiver.save (path); } @@ -471,20 +475,20 @@ void run () if (statistic == stat_tck::NONE && interp == interp_type::PRECISE) throw Exception ("Precise streamline mapping may only be used with per-streamline statistics"); - MR::copy_ptr tdi; + Image tdi; if (get_options ("use_tdi_fraction").size()) { if (statistic == stat_tck::NONE) throw Exception ("Cannot use -use_tdi_fraction option unless a per-streamline statistic is used"); DWI::Tractography::Reader tdi_reader (argument[0], properties); DWI::Tractography::Mapping::TrackMapperBase mapper (H); mapper.set_use_precise_mapping (interp == interp_type::PRECISE); - tdi.reset (new TDI (H, num_tracks)); + tdi = Image::scratch (H, "TDI scratch image"); + TDI tdi_fill (tdi, num_tracks); Thread::run_queue (tdi_reader, Thread::batch (DWI::Tractography::Streamline()), Thread::multi (mapper), Thread::batch (DWI::Tractography::Mapping::SetVoxel()), - *tdi); - tdi->done(); + tdi_fill); } if (statistic == stat_tck::NONE) { diff --git a/cmd/tcksift.cpp b/cmd/tcksift.cpp index 6f6b2725ea..0cba161cc2 100644 --- a/cmd/tcksift.cpp +++ b/cmd/tcksift.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "types.h" diff --git a/cmd/tcksift2.cpp b/cmd/tcksift2.cpp index 54aa80b00b..48a02a31eb 100644 --- a/cmd/tcksift2.cpp +++ b/cmd/tcksift2.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "exception.h" #include "image.h" @@ -98,7 +99,10 @@ const OptionGroup SIFT2AlgorithmOption = OptionGroup ("Options for controlling t + Option ("min_cf_decrease", "minimum decrease in the cost function (as a fraction of the initial value) that must occur each iteration for the algorithm to continue " "(default: " + str(SIFT2_MIN_CF_DECREASE_DEFAULT, 2) + ")") - + Argument ("frac").type_float (0.0, 1.0); + + Argument ("frac").type_float (0.0, 1.0) + + + Option ("linear", "perform a linear estimation of streamline weights, rather than the standard non-linear optimisation " + "(typically does not provide as accurate a model fit; but only requires a single pass)"); @@ -109,7 +113,7 @@ void usage () AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; - SYNOPSIS = "Successor to the SIFT method; instead of removing streamlines, use an EM framework to find an appropriate cross-section multiplier for each streamline"; + SYNOPSIS = "Optimise per-streamline cross-section multipliers to match a whole-brain tractogram to fixel-wise fibre densities"; REFERENCES + "Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. " // Internal @@ -173,44 +177,52 @@ void run () if (output_debug) tckfactor.output_all_debug_images ("before"); - auto opt = get_options ("csv"); - if (opt.size()) - tckfactor.set_csv_path (opt[0][0]); - - const float reg_tikhonov = get_option_value ("reg_tikhonov", SIFT2_REGULARISATION_TIKHONOV_DEFAULT); - const float reg_tv = get_option_value ("reg_tv", SIFT2_REGULARISATION_TV_DEFAULT); - tckfactor.set_reg_lambdas (reg_tikhonov, reg_tv); - - opt = get_options ("min_iters"); - if (opt.size()) - tckfactor.set_min_iters (int(opt[0][0])); - opt = get_options ("max_iters"); - if (opt.size()) - tckfactor.set_max_iters (int(opt[0][0])); - opt = get_options ("min_factor"); - if (opt.size()) - tckfactor.set_min_factor (float(opt[0][0])); - opt = get_options ("min_coeff"); - if (opt.size()) - tckfactor.set_min_coeff (float(opt[0][0])); - opt = get_options ("max_factor"); - if (opt.size()) - tckfactor.set_max_factor (float(opt[0][0])); - opt = get_options ("max_coeff"); - if (opt.size()) - tckfactor.set_max_coeff (float(opt[0][0])); - opt = get_options ("max_coeff_step"); - if (opt.size()) - tckfactor.set_max_coeff_step (float(opt[0][0])); - opt = get_options ("min_cf_decrease"); - if (opt.size()) - tckfactor.set_min_cf_decrease (float(opt[0][0])); + if (get_options ("linear").size()) { + + tckfactor.calc_afcsa(); + + } else { + + auto opt = get_options ("csv"); + if (opt.size()) + tckfactor.set_csv_path (opt[0][0]); + + const float reg_tikhonov = get_option_value ("reg_tikhonov", SIFT2_REGULARISATION_TIKHONOV_DEFAULT); + const float reg_tv = get_option_value ("reg_tv", SIFT2_REGULARISATION_TV_DEFAULT); + tckfactor.set_reg_lambdas (reg_tikhonov, reg_tv); + + opt = get_options ("min_iters"); + if (opt.size()) + tckfactor.set_min_iters (int(opt[0][0])); + opt = get_options ("max_iters"); + if (opt.size()) + tckfactor.set_max_iters (int(opt[0][0])); + opt = get_options ("min_factor"); + if (opt.size()) + tckfactor.set_min_factor (float(opt[0][0])); + opt = get_options ("min_coeff"); + if (opt.size()) + tckfactor.set_min_coeff (float(opt[0][0])); + opt = get_options ("max_factor"); + if (opt.size()) + tckfactor.set_max_factor (float(opt[0][0])); + opt = get_options ("max_coeff"); + if (opt.size()) + tckfactor.set_max_coeff (float(opt[0][0])); + opt = get_options ("max_coeff_step"); + if (opt.size()) + tckfactor.set_max_coeff_step (float(opt[0][0])); + opt = get_options ("min_cf_decrease"); + if (opt.size()) + tckfactor.set_min_cf_decrease (float(opt[0][0])); + + tckfactor.estimate_factors(); - tckfactor.estimate_factors(); + } tckfactor.output_factors (argument[2]); - opt = get_options ("out_coeffs"); + auto opt = get_options ("out_coeffs"); if (opt.size()) tckfactor.output_coefficients (opt[0][0]); diff --git a/cmd/tckstats.cpp b/cmd/tckstats.cpp index 8ec420d69d..ed3d83b8b6 100644 --- a/cmd/tckstats.cpp +++ b/cmd/tckstats.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "memory.h" #include "progressbar.h" @@ -47,7 +48,7 @@ void usage () AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; - SYNOPSIS = "Calculate statistics on streamlines length"; + SYNOPSIS = "Calculate statistics on streamlines lengths"; ARGUMENTS + Argument ("tracks_in", "the input track file").type_tracks_in(); @@ -65,9 +66,6 @@ void usage () + Option ("dump", "dump the streamlines lengths to a text file") + Argument ("path").type_file_out() - + Option ("explicit", "explicitly calculate the length of each streamline, " - "ignoring any step size information present in the header") - + Option ("ignorezero", "do not generate a warning if the track file contains streamlines with zero length") + Tractography::TrackWeightsInOption; @@ -110,8 +108,9 @@ void run () size_t count = 0, header_count = 0; float min_length = std::numeric_limits::infinity(); float max_length = -std::numeric_limits::infinity(); - double sum_lengths = 0.0, sum_weights = 0.0; - vector histogram; + size_t empty_streamlines = 0, zero_length_streamlines = 0; + default_type sum_lengths = 0.0, sum_weights = 0.0; + vector histogram; vector all_lengths; all_lengths.reserve (header_count); @@ -122,14 +121,9 @@ void run () if (properties.find ("count") != properties.end()) header_count = to (properties["count"]); - if (!get_options ("explicit").size()) { - step_size = get_step_size (properties); - if (!std::isfinite (step_size) || !step_size) { - INFO ("Streamline step size undefined in header; lengths will be calculated manually"); - if (get_options ("histogram").size()) { - WARN ("Do not have streamline step size with which to construct histogram; histogram will be generated using 1mm bin widths"); - } - } + step_size = properties.get_stepsize(); + if ((!std::isfinite (step_size) || !step_size) && get_options ("histogram").size()) { + WARN ("Do not have streamline step size with which to bin histogram; histogram will be generated using 1mm bin widths"); } std::unique_ptr dump; @@ -141,7 +135,7 @@ void run () Streamline<> tck; while (reader (tck)) { ++count; - const float length = std::isfinite (step_size) ? tck.calc_length (step_size) : tck.calc_length(); + const float length = Tractography::length (tck); if (std::isfinite (length)) { min_length = std::min (min_length, length); max_length = std::max (max_length, length); @@ -152,6 +146,10 @@ void run () while (histogram.size() <= index) histogram.push_back (0.0); histogram[index] += tck.weight; + if (!length) + ++zero_length_streamlines; + } else { + ++empty_streamlines; } if (dump) (*dump) << length << "\n"; @@ -159,8 +157,17 @@ void run () } } - if (histogram.size() && histogram.front() && !get_options ("ignorezero").size()) - WARN ("read " + str(histogram.front()) + " zero-length tracks"); + if (!get_options ("ignorezero").size() && (empty_streamlines || zero_length_streamlines)) { + std::string s ("read"); + if (empty_streamlines) { + s += " " + str(empty_streamlines) + " empty streamlines"; + if (zero_length_streamlines) + s += " and"; + } + if (zero_length_streamlines) + s += " " + str(zero_length_streamlines) + " streamlines with zero length (one vertex only)"; + WARN (s); + } if (count != header_count) WARN ("expected " + str(header_count) + " tracks according to header; read " + str(count)); if (!std::isfinite (min_length)) @@ -176,7 +183,7 @@ void run () // Perform a weighted median calculation std::sort (all_lengths.begin(), all_lengths.end()); size_t median_index = 0; - double sum = sum_weights - all_lengths[0].get_weight(); + default_type sum = sum_weights - all_lengths[0].get_weight(); while (sum > 0.5 * sum_weights) { sum -= all_lengths[++median_index].get_weight(); } median_length = all_lengths[median_index].get_length(); } else { @@ -186,10 +193,10 @@ void run () median_length = NaN; } - double stdev = 0.0; + default_type ssd = 0.0; for (vector::const_iterator i = all_lengths.begin(); i != all_lengths.end(); ++i) - stdev += i->get_weight() * Math::pow2 (i->get_length() - mean_length); - stdev = sum_weights ? (std::sqrt (stdev / (((count - 1) / float(count)) * sum_weights))) : NaN; + ssd += i->get_weight() * Math::pow2 (i->get_length() - mean_length); + const float stdev = sum_weights ? (std::sqrt (ssd / (((count - 1) / default_type(count)) * sum_weights))) : NaN; vector fields; auto opt = get_options ("output"); @@ -231,6 +238,7 @@ void run () opt = get_options ("histogram"); if (opt.size()) { File::OFStream out (opt[0][0], std::ios_base::out | std::ios_base::trunc); + out << "# " << App::command_history_string << "\n"; if (!std::isfinite (step_size)) step_size = 1.0f; if (weights_provided) { diff --git a/cmd/tcktransform.cpp b/cmd/tcktransform.cpp index 0611165824..a957d0e1c6 100644 --- a/cmd/tcktransform.cpp +++ b/cmd/tcktransform.cpp @@ -1,23 +1,24 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" -#include "progressbar.h" #include "image.h" +#include "ordered_thread_queue.h" +#include "progressbar.h" #include "interp/linear.h" -#include "thread_queue.h" #include "dwi/tractography/file.h" #include "dwi/tractography/properties.h" @@ -67,7 +68,7 @@ class Warper { MEMALIGN(Warper) bool operator () (const TrackType& in, TrackType& out) { out.clear(); - out.index = in.index; + out.set_index (in.get_index()); out.weight = in.weight; for (size_t n = 0; n < in.size(); ++n) { auto vertex = pos(in[n]); @@ -126,7 +127,7 @@ void run () Writer writer (argument[2], loader.properties); - Thread::run_queue ( + Thread::run_ordered_queue ( loader, Thread::batch (TrackType(), 1024), Thread::multi (warper), diff --git a/cmd/tensor2metric.cpp b/cmd/tensor2metric.cpp index 26b36a2755..8388a33a1f 100644 --- a/cmd/tensor2metric.cpp +++ b/cmd/tensor2metric.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "image.h" @@ -29,52 +30,52 @@ const char* modulate_choices[] = { "none", "fa", "eigval", NULL }; void usage () { - ARGUMENTS + ARGUMENTS + Argument ("tensor", "the input tensor image.").type_image_in (); - OPTIONS + OPTIONS + Option ("adc", "compute the mean apparent diffusion coefficient (ADC) of the diffusion tensor. " "(sometimes also referred to as the mean diffusivity (MD))") + Argument ("image").type_image_out() - + + Option ("fa", "compute the fractional anisotropy (FA) of the diffusion tensor.") + Argument ("image").type_image_out() - + + Option ("ad", "compute the axial diffusivity (AD) of the diffusion tensor. " "(equivalent to the principal eigenvalue)") + Argument ("image").type_image_out() - + + Option ("rd", "compute the radial diffusivity (RD) of the diffusion tensor. " "(equivalent to the mean of the two non-principal eigenvalues)") + Argument ("image").type_image_out() - + + Option ("cl", "compute the linearity metric of the diffusion tensor. " "(one of the three Westin shape metrics)") + Argument ("image").type_image_out() - + + Option ("cp", "compute the planarity metric of the diffusion tensor. " "(one of the three Westin shape metrics)") + Argument ("image").type_image_out() - + + Option ("cs", "compute the sphericity metric of the diffusion tensor. " "(one of the three Westin shape metrics)") + Argument ("image").type_image_out() - + + Option ("value", "compute the selected eigenvalue(s) of the diffusion tensor.") + Argument ("image").type_image_out() - + + Option ("vector", "compute the selected eigenvector(s) of the diffusion tensor.") + Argument ("image").type_image_out() - + + Option ("num", "specify the desired eigenvalue/eigenvector(s). Note that several eigenvalues " "can be specified as a number sequence. For example, '1,3' specifies the " @@ -85,16 +86,16 @@ void usage () "specify how to modulate the magnitude of the eigenvectors. Valid choices " "are: none, FA, eigval (default = FA).") + Argument ("choice").type_choice (modulate_choices) - + + Option ("mask", "only perform computation within the specified binary brain mask image.") + Argument ("image").type_image_in(); - + AUTHOR = "Thijs Dhollander (thijs.dhollander@gmail.com) & Ben Jeurissen (ben.jeurissen@uantwerpen.be) & J-Donald Tournier (jdtournier@gmail.com)"; SYNOPSIS = "Generate maps of tensor-derived parameters"; - - REFERENCES + + REFERENCES + "Basser, P. J.; Mattiello, J. & Lebihan, D. " "MR diffusion tensor spectroscopy and imaging. " "Biophysical Journal, 1994, 66, 259-267" @@ -105,17 +106,17 @@ void usage () class Processor { MEMALIGN(Processor) public: - Processor (Image& mask_img, - Image& adc_img, - Image& fa_img, - Image& ad_img, - Image& rd_img, - Image& cl_img, - Image& cp_img, - Image& cs_img, - Image& value_img, - Image& vector_img, - vector& vals, + Processor (Image& mask_img, + Image& adc_img, + Image& fa_img, + Image& ad_img, + Image& rd_img, + Image& cl_img, + Image& cp_img, + Image& cs_img, + Image& value_img, + Image& vector_img, + vector& vals, int modulate) : mask_img (mask_img), adc_img (adc_img), @@ -135,36 +136,36 @@ class Processor { MEMALIGN(Processor) void operator() (Image& dt_img) { - /* check mask */ + /* check mask */ if (mask_img.valid()) { assign_pos_of (dt_img, 0, 3).to (mask_img); if (!mask_img.value()) return; } - + /* input dt */ Eigen::Matrix dt; for (auto l = Loop (3) (dt_img); l; ++l) dt[dt_img.index(3)] = dt_img.value(); - + /* output adc */ if (adc_img.valid()) { assign_pos_of (dt_img, 0, 3).to (adc_img); adc_img.value() = DWI::tensor2ADC(dt); } - + double fa = 0.0; if (fa_img.valid() || (vector_img.valid() && (modulate == 1))) fa = DWI::tensor2FA(dt); - + /* output fa */ if (fa_img.valid()) { assign_pos_of (dt_img, 0, 3).to (fa_img); fa_img.value() = fa; } - + bool need_eigenvalues = value_img.valid() || vector_img.valid() || ad_img.valid() || rd_img.valid() || cl_img.valid() || cp_img.valid() || cs_img.valid(); - + Eigen::SelfAdjointEigenSolver es; if (need_eigenvalues || vector_img.valid()) { Eigen::Matrix3d M; @@ -176,16 +177,16 @@ class Processor { MEMALIGN(Processor) M (1,2) = M (2,1) = dt[5]; es = Eigen::SelfAdjointEigenSolver(M, vector_img.valid() ? Eigen::ComputeEigenvectors : Eigen::EigenvaluesOnly); } - + Eigen::Vector3d eigval; ssize_t ith_eig[3] = { 2, 1, 0 }; if (need_eigenvalues) { eigval = es.eigenvalues(); ith_eig[0] = 0; ith_eig[1] = 1; ith_eig[2] = 2; - std::sort (std::begin (ith_eig), std::end (ith_eig), + std::sort (std::begin (ith_eig), std::end (ith_eig), [&eigval](size_t a, size_t b) { return abs(eigval[a]) > abs(eigval[b]); }); } - + /* output value */ if (value_img.valid()) { assign_pos_of (dt_img, 0, 3).to (value_img); @@ -198,19 +199,19 @@ class Processor { MEMALIGN(Processor) value_img.value() = eigval(ith_eig[vals[0]]); } } - + /* output ad */ if (ad_img.valid()) { assign_pos_of (dt_img, 0, 3).to (ad_img); ad_img.value() = eigval(2); } - + /* output rd */ if (rd_img.valid()) { assign_pos_of (dt_img, 0, 3).to (rd_img); rd_img.value() = (eigval(1) + eigval(0)) / 2; } - + /* output shape measures */ if (cl_img.valid() || cp_img.valid() || cs_img.valid()) { double eigsum = eigval.sum(); @@ -229,7 +230,7 @@ class Processor { MEMALIGN(Processor) } } } - + /* output vector */ if (vector_img.valid()) { Eigen::Matrix3d eigvec = es.eigenvectors(); @@ -245,9 +246,9 @@ class Processor { MEMALIGN(Processor) vector_img.value() = eigvec(1,ith_eig[vals[i]])*fact; l++; vector_img.value() = eigvec(2,ith_eig[vals[i]])*fact; l++; } - } + } } - + private: Image mask_img; Image adc_img; @@ -283,7 +284,7 @@ void run () } size_t metric_count = 0; - + auto adc_img = Image(); opt = get_options ("adc"); if (opt.size()) { @@ -291,7 +292,7 @@ void run () adc_img = Image::create (opt[0][0], header); metric_count++; } - + auto fa_img = Image(); opt = get_options ("fa"); if (opt.size()) { @@ -299,7 +300,7 @@ void run () fa_img = Image::create (opt[0][0], header); metric_count++; } - + auto ad_img = Image(); opt = get_options ("ad"); if (opt.size()) { @@ -307,7 +308,7 @@ void run () ad_img = Image::create (opt[0][0], header); metric_count++; } - + auto rd_img = Image(); opt = get_options ("rd"); if (opt.size()) { @@ -315,7 +316,7 @@ void run () rd_img = Image::create (opt[0][0], header); metric_count++; } - + auto cl_img = Image(); opt = get_options ("cl"); if (opt.size()) { @@ -323,7 +324,7 @@ void run () cl_img = Image::create (opt[0][0], header); metric_count++; } - + auto cp_img = Image(); opt = get_options ("cp"); if (opt.size()) { @@ -331,7 +332,7 @@ void run () cp_img = Image::create (opt[0][0], header); metric_count++; } - + auto cs_img = Image(); opt = get_options ("cs"); if (opt.size()) { @@ -339,14 +340,14 @@ void run () cs_img = Image::create (opt[0][0], header); metric_count++; } - + vector vals = {1}; opt = get_options ("num"); if (opt.size()) { vals = opt[0][0]; if (vals.empty()) throw Exception ("invalid eigenvalue/eigenvector number specifier"); - for (size_t i = 0; i < vals.size(); ++i) + for (size_t i = 0; i < vals.size(); ++i) if (vals[i] < 1 || vals[i] > 3) throw Exception ("eigenvalue/eigenvector number is out of bounds"); } @@ -364,7 +365,7 @@ void run () value_img = Image::create (opt[0][0], header); metric_count++; } - + auto vector_img = Image(); opt = get_options ("vector"); if (opt.size()) { @@ -376,7 +377,7 @@ void run () if (!metric_count) throw Exception ("No output specified; must request at least one metric of interest using the available command-line options"); - + ThreadedLoop (std::string("computing metric") + (metric_count > 1 ? "s" : ""), dt_img, 0, 3) .run (Processor (mask_img, adc_img, fa_img, ad_img, rd_img, cl_img, cp_img, cs_img, value_img, vector_img, vals, modulate), dt_img); } diff --git a/cmd/transformcalc.cpp b/cmd/transformcalc.cpp index ae1288acf7..917efd1a41 100644 --- a/cmd/transformcalc.cpp +++ b/cmd/transformcalc.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include #include @@ -37,6 +38,7 @@ const char* operations[] = { "interpolate", "decompose", "align_vertices_rigid", + "align_vertices_rigid_scale", NULL }; @@ -47,49 +49,55 @@ void usage () SYNOPSIS = "Perform calculations on linear transformation matrices"; ARGUMENTS - + Argument ("inputs", "the inputs for the specified operation").allow_multiple() + + Argument ("inputs", "the input(s) for the specified operation").allow_multiple() + Argument ("operation", "the operation to perform, one of: " + join(operations, ", ") + " (see description section for details).").type_choice (operations) + Argument ("output", "the output transformation matrix.").type_file_out (); - DESCRIPTION - + "invert: invert the input transformation:" - + "matrix_in invert output" - - + "half: calculate the matrix square root of the input transformation:" - + "matrix_in half output" - - + "rigid: calculate the rigid transformation of the affine input transformation:" - + "matrix_in rigid output" - - + "header: calculate the transformation matrix from an original image and an image with modified header:" - + "mov mapmovhdr header output" - - + "average: calculate the average affine matrix of all input matrices:" - + "input ... average output" - - + "interpolate: create interpolated transformation matrix between input (t=0) and input2 (t=1). " - "Based on matrix decomposition with linear interpolation of " - "translation, rotation and stretch described in " - "Shoemake, K., Hill, M., & Duff, T. (1992). Matrix Animation and Polar Decomposition. " - "Matrix, 92, 258-264. doi:10.1.1.56.1336:" - + "input input2 interpolate output" - - + "decompose: decompose transformation matrix M into translation, rotation and stretch and shear (M = T * R * S). " - "The output is a key-value text file containing: " - "scaling: vector of 3 scaling factors in x, y, z direction; " - "shear: list of shear factors for xy, xz, yz axes; " - "angles: list of Euler angles about static x, y, z axes in radians in the range [0:pi]x[-pi:pi]x[-pi:pi]; " - "angle_axis: angle in radians and rotation axis; " - "translation : translation vector along x, y, z axes in mm; " - "R: composed roation matrix (R = rot_x * rot_y * rot_z); " - "S: composed scaling and shear matrix:" - + "matrix_in decompose output" - - + "align_vertices_rigid: align two sets of landmarks using a rigid transformation. " - "Vertex coordinates are in scanner space, corresponding vertices must be stored in the same row " - "of moving.txt and fixed.txt. Requires 3 or more vertices in each file. " - "Algorithm: Kabsch 'A solution for the best rotation to relate two sets of vectors' DOI:10.1107/S0567739476001873:" - + "input moving.txt fixed.txt align_vertices_rigid output"; + EXAMPLES + + Example ("Invert a transformation", + "transformcalc matrix_in.txt invert matrix_out.txt", + "") + + + Example ("Calculate the matrix square root of the input transformation (halfway transformation)", + "transformcalc matrix_in.txt half matrix_out.txt", + "") + + + Example ("Calculate the rigid component of an affine input transformation", + "transformcalc affine_in.txt rigid rigid_out.txt", + "") + + + Example ("Calculate the transformation matrix from an original image and an image with modified header", + "transformcalc mov mapmovhdr header output", + "") + + + Example ("Calculate the average affine matrix of a set of input matrices", + "transformcalc input1.txt ... inputN.txt average matrix_out.txt", + "") + + + Example ("Create interpolated transformation matrix between two inputs", + "transformcalc input1.txt input2.txt interpolate matrix_out.txt", + "Based on matrix decomposition with linear interpolation of " + "translation, rotation and stretch described in: " + "Shoemake, K., Hill, M., & Duff, T. (1992). Matrix Animation and Polar Decomposition. " + "Matrix, 92, 258-264. doi:10.1.1.56.1336") + + + Example ("Decompose transformation matrix M into translation, rotation and stretch and shear (M = T * R * S)", + "transformcalc matrix_in.txt decompose matrixes_out.txt", + "The output is a key-value text file containing: " + "scaling: vector of 3 scaling factors in x, y, z direction; " + "shear: list of shear factors for xy, xz, yz axes; " + "angles: list of Euler angles about static x, y, z axes in radians in the range [0:pi]x[-pi:pi]x[-pi:pi]; " + "angle_axis: angle in radians and rotation axis; " + "translation : translation vector along x, y, z axes in mm; " + "R: composed roation matrix (R = rot_x * rot_y * rot_z); " + "S: composed scaling and shear matrix") + + + Example ("Calculate transformation that aligns two images based on sets of corresponding landmarks", + "transformcalc input moving.txt fixed.txt align_vertices_rigid rigid.txt", + "Similary, 'align_vertices_rigid_scale' produces an affine matrix (rigid and global scale). " + "Vertex coordinates are in scanner space, corresponding vertices must be stored in the same row " + "of moving.txt and fixed.txt. Requires 3 or more vertices in each file. " + "Algorithm: Kabsch 'A solution for the best rotation to relate two sets of vectors' DOI:10.1107/S0567739476001873"); } @@ -97,27 +105,28 @@ template int sgn(T val) { return (T(0) < val) - (val < T(0)); } -transform_type align_corresponding_vertices (const Eigen::MatrixXd &target_vertices, const Eigen::MatrixXd &moving_vertices, bool scale) { +transform_type align_corresponding_vertices (const Eigen::MatrixXd &src_vertices, const Eigen::MatrixXd &trg_vertices, bool scale) { // this function aligns two sets of vertices which must have corresponding vertices stored in the same row // // scale == false --> Kabsch - // minimise (target_vertices.row(i) - M * moving_vertices.row(i) + t).squaredNorm(); + // minimise (src_vertices.row(i) - M * trg_vertices.row(i) + t).squaredNorm(); // - // scale == true --> Umeyama + // scale == true // nonrigid version of Kabsch algorithm that also includes scale (not shear) // - assert(target_vertices.rows() == moving_vertices.rows()); - const size_t n = moving_vertices.rows(); - assert (n > 2); + assert(src_vertices.rows() == trg_vertices.rows()); + const size_t n = trg_vertices.rows(); + if (n < 3) + throw Exception ("vertex alignment requires at least 3 points"); - assert(target_vertices.cols() == moving_vertices.cols()); - assert(target_vertices.cols() == 3 && "align_corresponding_vertices implemented only for 3D data"); + assert(src_vertices.cols() == trg_vertices.cols()); + assert(src_vertices.cols() == 3 && "align_corresponding_vertices implemented only for 3D data"); - Eigen::VectorXd moving_centre = moving_vertices.colwise().mean(); - Eigen::VectorXd target_centre = target_vertices.colwise().mean(); - Eigen::MatrixXd moving_centered = moving_vertices.rowwise() - moving_centre.transpose(); - Eigen::MatrixXd target_centered = target_vertices.rowwise() - target_centre.transpose(); - Eigen::MatrixXd cov = (target_centered.adjoint() * moving_centered) / default_type (n - 1); + Eigen::VectorXd trg_centre = trg_vertices.colwise().mean(); + Eigen::VectorXd src_centre = src_vertices.colwise().mean(); + Eigen::MatrixXd trg_centred = trg_vertices.rowwise() - trg_centre.transpose(); + Eigen::MatrixXd src_centred = src_vertices.rowwise() - src_centre.transpose(); + Eigen::MatrixXd cov = (src_centred.adjoint() * trg_centred) / default_type (n - 1); Eigen::JacobiSVD svd (cov, Eigen::ComputeFullU | Eigen::ComputeFullV); @@ -136,18 +145,25 @@ transform_type align_corresponding_vertices (const Eigen::MatrixXd &target_verti R = Eigen::Quaterniond(R).normalized().toRotationMatrix(); if (scale) { - default_type fsq = 0; - for (size_t i = 0; i < n; ++ i) - fsq += moving_centered.row(i).squaredNorm(); + default_type fsq_t = 0.0; + default_type fsq_s = 0.0; + for (size_t i = 0; i < n; ++ i) { + fsq_t += trg_centred.row(i).squaredNorm(); + fsq_s += src_centred.row(i).squaredNorm(); + } // calculate and apply the scale - default_type fscale = svd.singularValues().dot(e) / fsq; + default_type fscale = sqrt(fsq_t / fsq_s); // Umeyama: svd.singularValues().dot(e) / fsq; + DEBUG("scaling: "+str(fscale)); R *= fscale; } - transform_type T; - T.linear() = R; - T.translation() = target_centre - (R * moving_centre); - return T; + transform_type T1 = transform_type::Identity(); + transform_type T2 = transform_type::Identity(); + transform_type T3 = transform_type::Identity(); + T1.translation() = trg_centre; + T2.linear() = R; + T3.translation() = -src_centre; + return T1 * T2 * T3; } void run () @@ -276,6 +292,7 @@ void run () File::OFStream out (output_path); + out << "# " << App::command_history_string << "\n"; Eigen::IOFormat fmt(Eigen::FullPrecision, Eigen::DontAlignCols, " ", "\n", "", "", "", "\n"); out << "scaling: " << Eigen::RowVector3d(S(0,0), S(1,1), S(2,2)).format(fmt); out << "shear: " << Eigen::RowVector3d(S(0,1), S(0,2), S(1,2)).format(fmt); @@ -288,15 +305,16 @@ void run () out << "S: " << S.row(0).format(fmt); out << "S: " << S.row(1).format(fmt); out << "S: " << S.row(2).format(fmt); + out << "jacobian_det: " << str(transform.linear().topLeftCorner<3,3>().determinant()) << "\n"; break; } - case 7: { // align_vertices_rigid + case 7: case 8: { // align_vertices_rigid and align_vertices_rigid_scale if (num_inputs != 2) - throw Exception ("align_vertices_rigid requires 2 input"); + throw Exception ("align_vertices_rigid requires 2 inputs"); const Eigen::MatrixXd target_vertices = load_matrix (argument[0]); const Eigen::MatrixXd moving_vertices = load_matrix (argument[1]); - const transform_type T = align_corresponding_vertices (target_vertices, moving_vertices, false); + const transform_type T = align_corresponding_vertices (moving_vertices, target_vertices, op==8); save_transform (T, output_path); break; } diff --git a/cmd/transformcompose.cpp b/cmd/transformcompose.cpp index 66e3224d1c..82434bc181 100644 --- a/cmd/transformcompose.cpp +++ b/cmd/transformcompose.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "algo/threaded_loop.h" @@ -67,21 +68,25 @@ void usage () SYNOPSIS = "Compose any number of linear transformations and/or warps into a single transformation"; DESCRIPTION - + "The input linear transforms must be supplied in as a 4x4 matrix in a text file (as per the output of mrregister)." - "The input warp fields must be supplied as a 4D image representing a deformation field (as output from mrrregister -nl_warp)."; + + "Any linear transforms must be supplied as a 4x4 matrix in a text file (e.g. as per the output of mrregister). " + "Any warp fields must be supplied as a 4D image representing a deformation field (e.g. as output from mrrregister -nl_warp)." - ARGUMENTS - + Argument ("input", "the input transforms (either linear or non-linear warps). List transforms in the order you like them to be " - "applied to an image (as if you were applying them seperately with mrtransform).").type_file_in().allow_multiple() + + "Input transformations should be provided to the command in the order in which they would be applied " + "to an image if they were to be applied individually." - + Argument ("output", "the output file. If all input transformations are linear, then the output will also be a linear " - "transformation saved as a 4x4 matrix in a text file. If a template image is supplied, then the output will " - "always be a deformation field (see below). If all inputs are warps, or a mix of linear and warps, then the " - "output will be a deformation field defined on the grid of the last input warp supplied.").type_file_out (); + + "If all input transformations are linear, and the -template option is not provided, then the file output by " + "the command will also be a linear transformation saved as a 4x4 matrix in a text file. If a template image is " + "supplied, then the output will always be a deformation field. If at least one of the inputs is a warp field, " + "then the output will be a deformation field, which will be defined on the grid of the last input warp image " + "supplied if the -template option is not used."; + + ARGUMENTS + + Argument ("input", "the input transforms (either linear or non-linear warps).").type_file_in().allow_multiple() + + Argument ("output", "the output file (may be a linear transformation text file, or a deformation warp field image, depending on usage)").type_various(); OPTIONS - + Option ("template", "define the output grid defined by a template image") - + Argument ("image").type_image_in(); + + Option ("template", "define the output grid defined by a template image") + + Argument ("image").type_image_in(); } diff --git a/cmd/transformconvert.cpp b/cmd/transformconvert.cpp index fdf1a33ef3..f332d5f2a5 100644 --- a/cmd/transformconvert.cpp +++ b/cmd/transformconvert.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include #include "command.h" @@ -38,22 +39,24 @@ void usage () SYNOPSIS = "Convert linear transformation matrices"; DESCRIPTION - + "This command allows to convert the transformation matrix provided by FSL's flirt command " - "and ITK's linear transformation format to a format usable in MRtrix."; + + "This command allows to convert transformation matrices provided by other registration " + "softwares to a format usable in MRtrix3. Example usages are provided below."; - ARGUMENTS - + Argument ("input", "the input for the specified operation").allow_multiple() - + Argument ("operation", "the operation to perform, one of:\n" + join(operations, ", ") + "." - + "\n\nflirt_import: " - + "Convert a transformation matrix produced by FSL's flirt command into a format usable by MRtrix. " - "You'll need to provide as additional arguments the NIfTI images that were passed to flirt " - "with the -in and -ref options:\nmatrix_in in ref flirt_import output" - - + "\n\nitk_import: " - + "Convert a plain text transformation matrix file produced by ITK's (ANTS, Slicer) affine registration " - + "into a format usable by MRtrix." - ).type_choice (operations) + EXAMPLES + + Example ("Convert a transformation matrix produced by FSL's flirt command into a format usable by MRtrix3", + "transformconvert transform_flirt.mat flirt_in.nii flirt_ref.nii flirt_import transform_mrtrix.txt", + "The two images provided as inputs for this operation must be in the correct order: first the image " + "that was provided to flirt via the -in option, second the image that was provided to flirt via the " + "-ref option.") + + Example ("Convert a plain text transformation matrix file produced by ITK's affine registration " + "(e.g. ANTS, Slicer) into a format usable by MRtrix3", + "transformconvert transform_itk.txt itk_import transform_mrtrix.txt", + ""); + + ARGUMENTS + + Argument ("input", "the input(s) for the specified operation").allow_multiple() + + Argument ("operation", "the operation to perform, one of:\n" + join(operations, ", ")).type_choice (operations) + Argument ("output", "the output transformation matrix.").type_file_out (); } @@ -137,7 +140,7 @@ void parse_itk_trafo (const std::string& itk_file, TransformationType& transform // QuaternionRigidTransform_double_3_3? // QuaternionRigidTransform_float_3_3? - File::KeyValue file (itk_file, first_line.c_str()); + File::KeyValue::Reader file (itk_file, first_line.c_str()); std::string line; size_t invalid (2); while (file.next()) { diff --git a/cmd/tsfdivide.cpp b/cmd/tsfdivide.cpp index 48879af593..9eb276821b 100644 --- a/cmd/tsfdivide.cpp +++ b/cmd/tsfdivide.cpp @@ -1,21 +1,23 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "dwi/tractography/properties.h" #include "dwi/tractography/scalar_file.h" +#include "dwi/tractography/streamline.h" using namespace MR; @@ -28,8 +30,8 @@ void usage () SYNOPSIS = "Divide corresponding values in track scalar files"; ARGUMENTS - + Argument ("input", "the input track scalar file.").type_file_in() - + Argument ("input", "the input track scalar file.").type_file_in() + + Argument ("input1", "the first input track scalar file.").type_file_in() + + Argument ("input2", "the second input track scalar file.").type_file_in() + Argument ("output", "the output track scalar file").type_file_out(); } @@ -38,30 +40,37 @@ using value_type = float; void run () { - DWI::Tractography::Properties properties1; + DWI::Tractography::Properties properties1, properties2; DWI::Tractography::ScalarReader reader1 (argument[0], properties1); - DWI::Tractography::Properties properties2; DWI::Tractography::ScalarReader reader2 (argument[1], properties2); - DWI::Tractography::ScalarWriter writer (argument[2], properties1); - DWI::Tractography::check_properties_match (properties1, properties2, "scalar", false); - vector tck_scalar1; - vector tck_scalar2; + DWI::Tractography::ScalarWriter writer (argument[2], properties1); + DWI::Tractography::TrackScalar tck_scalar1, tck_scalar2, tck_scalar_output; while (reader1 (tck_scalar1)) { - if (!reader2 (tck_scalar2)) + if (!reader2 (tck_scalar2)) { + WARN ("No more track scalars left in input file \"" + std::string(argument[1]) + + "\" after " + str(tck_scalar1.get_index()+1) + " streamlines; " + + "but more data are present in input file \"" + std::string(argument[0]) + "\""); break; + } if (tck_scalar1.size() != tck_scalar2.size()) - throw Exception ("track scalar length mismatch"); + throw Exception ("track scalar length mismatch at streamline index " + str(tck_scalar1.get_index())); - vector tck_scalar_output (tck_scalar1.size()); + tck_scalar_output.set_index (tck_scalar1.get_index()); + tck_scalar_output.resize (tck_scalar1.size()); for (size_t i = 0; i < tck_scalar1.size(); ++i) { - if (tck_scalar2[i] == 0.0) - tck_scalar_output[i] = 0; + if (tck_scalar2[i] == value_type(0)) + tck_scalar_output[i] = value_type(0); else tck_scalar_output[i] = tck_scalar1[i] / tck_scalar2[i]; } writer (tck_scalar_output); } + if (reader2 (tck_scalar2)) { + WARN ("No more track scalars left in input file \"" + std::string(argument[0]) + + "\" after " + str(tck_scalar1.get_index()+1) + " streamlines; " + + "but more data are present in input file \"" + std::string(argument[1]) + "\""); + } } diff --git a/cmd/tsfinfo.cpp b/cmd/tsfinfo.cpp index 8dea149f81..13a32e4e0f 100644 --- a/cmd/tsfinfo.cpp +++ b/cmd/tsfinfo.cpp @@ -1,23 +1,25 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "file/ofstream.h" -#include "dwi/tractography/scalar_file.h" #include "dwi/tractography/properties.h" +#include "dwi/tractography/scalar_file.h" +#include "dwi/tractography/streamline.h" using namespace MR; using namespace MR::DWI; @@ -72,13 +74,13 @@ void run () std::cout << (i == properties.comments.begin() ? "" : " ") << *i << "\n"; } - for (std::multimap::const_iterator i = properties.roi.begin(); i != properties.roi.end(); ++i) + for (std::multimap::const_iterator i = properties.prior_rois.begin(); i != properties.prior_rois.end(); ++i) std::cout << " ROI: " << i->first << " " << i->second << "\n"; if (actual_count) { - vector tck; + DWI::Tractography::TrackScalar<> tck; size_t count = 0; { ProgressBar progress ("counting tracks in file"); @@ -92,12 +94,11 @@ void run () if (opt.size()) { ProgressBar progress ("writing track scalar data to ascii files"); - vector tck; - size_t count = 0; + DWI::Tractography::TrackScalar<> tck; while (file (tck)) { std::string filename (opt[0][0]); filename += "-000000.txt"; - std::string num (str (count)); + std::string num (str (tck.get_index())); filename.replace (filename.size()-4-num.size(), num.size(), num); File::OFStream out (filename); @@ -105,7 +106,6 @@ void run () out << (*i) << "\n"; out.close(); - count++; ++progress; } } diff --git a/cmd/tsfmult.cpp b/cmd/tsfmult.cpp index 90d8ac6882..236c31b5db 100644 --- a/cmd/tsfmult.cpp +++ b/cmd/tsfmult.cpp @@ -1,21 +1,23 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "dwi/tractography/properties.h" #include "dwi/tractography/scalar_file.h" +#include "dwi/tractography/streamline.h" using namespace MR; @@ -28,8 +30,8 @@ void usage () SYNOPSIS = "Multiply corresponding values in track scalar files"; ARGUMENTS - + Argument ("input", "the input track scalar file.").type_file_in() - + Argument ("input", "the input track scalar file.").type_file_in() + + Argument ("input1", "the first input track scalar file.").type_file_in() + + Argument ("input1", "the second input track scalar file.").type_file_in() + Argument ("output", "the output track scalar file").type_file_out(); } @@ -38,27 +40,34 @@ using value_type = float; void run () { - DWI::Tractography::Properties properties1; + DWI::Tractography::Properties properties1, properties2; DWI::Tractography::ScalarReader reader1 (argument[0], properties1); - DWI::Tractography::Properties properties2; DWI::Tractography::ScalarReader reader2 (argument[1], properties2); - DWI::Tractography::ScalarWriter writer (argument[2], properties1); - DWI::Tractography::check_properties_match (properties1, properties2, "scalar", false); - vector tck_scalar1; - vector tck_scalar2; + DWI::Tractography::ScalarWriter writer (argument[2], properties1); + DWI::Tractography::TrackScalar<> tck_scalar1, tck_scalar2, tck_scalar_output; while (reader1 (tck_scalar1)) { - if (!reader2 (tck_scalar2)) + if (!reader2 (tck_scalar2)) { + WARN ("No more track scalars left in input file \"" + std::string(argument[1]) + + "\" after " + str(tck_scalar1.get_index()+1) + " streamlines; " + + "but more data are present in input file \"" + std::string(argument[0]) + "\""); break; + } if (tck_scalar1.size() != tck_scalar2.size()) - throw Exception ("track scalar length mismatch"); + throw Exception ("track scalar length mismatch at streamline index " + str(tck_scalar1.get_index())); - vector tck_scalar_output (tck_scalar1.size()); + tck_scalar_output.set_index (tck_scalar1.get_index()); + tck_scalar_output.resize (tck_scalar1.size()); for (size_t i = 0; i < tck_scalar1.size(); ++i) { tck_scalar_output[i] = tck_scalar1[i] * tck_scalar2[i]; } writer (tck_scalar_output); } + if (reader2 (tck_scalar2)) { + WARN ("No more track scalars left in input file \"" + std::string(argument[0]) + + "\" after " + str(tck_scalar1.get_index()+1) + " streamlines; " + + "but more data are present in input file \"" + std::string(argument[1]) + "\""); + } } diff --git a/cmd/tsfsmooth.cpp b/cmd/tsfsmooth.cpp index 211ed0d45a..f127cd32ef 100644 --- a/cmd/tsfsmooth.cpp +++ b/cmd/tsfsmooth.cpp @@ -1,22 +1,24 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "math/median.h" #include "dwi/tractography/properties.h" #include "dwi/tractography/scalar_file.h" +#include "dwi/tractography/streamline.h" #define DEFAULT_SMOOTHING 4.0 @@ -51,7 +53,7 @@ void run () DWI::Tractography::ScalarWriter writer (argument[1], properties); float stdev = get_option_value ("stdev", DEFAULT_SMOOTHING); - + vector kernel (2 * ceil(2.5 * stdev) + 1, 0); float norm_factor = 0.0; float radius = (kernel.size() - 1.0) / 2.0; @@ -62,9 +64,10 @@ void run () for (size_t c = 0; c < kernel.size(); c++) kernel[c] /= norm_factor; - vector tck_scalar; + DWI::Tractography::TrackScalar tck_scalar; while (reader (tck_scalar)) { - vector tck_scalars_smoothed (tck_scalar.size()); + DWI::Tractography::TrackScalar tck_scalars_smoothed (tck_scalar.size()); + tck_scalars_smoothed.set_index (tck_scalar.get_index()); for (int i = 0; i < (int)tck_scalar.size(); ++i) { float norm_factor = 0.0; diff --git a/cmd/tsfthreshold.cpp b/cmd/tsfthreshold.cpp index d36464bd9c..fd290f49ac 100644 --- a/cmd/tsfthreshold.cpp +++ b/cmd/tsfthreshold.cpp @@ -1,21 +1,23 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "dwi/tractography/properties.h" #include "dwi/tractography/scalar_file.h" +#include "dwi/tractography/streamline.h" using namespace MR; @@ -29,7 +31,7 @@ void usage () ARGUMENTS + Argument ("input", "the input track scalar file.").type_file_in() - + Argument ("N", "the desired threshold").type_float () + + Argument ("T", "the desired threshold").type_float () + Argument ("output", "the binary output track scalar file").type_file_out(); @@ -50,20 +52,21 @@ void run () DWI::Tractography::ScalarReader reader (argument[0], properties); DWI::Tractography::ScalarWriter writer (argument[2], properties); - vector tck_scalar; + DWI::Tractography::TrackScalar tck_scalar; while (reader (tck_scalar)) { - vector tck_mask (tck_scalar.size()); + DWI::Tractography::TrackScalar tck_mask (tck_scalar.size()); + tck_mask.set_index (tck_scalar.get_index()); for (size_t i = 0; i < tck_scalar.size(); ++i) { if (invert) { if (tck_scalar[i] > threshold) - tck_mask[i] = 0.0; + tck_mask[i] = value_type(0); else - tck_mask[i] = 1.0; + tck_mask[i] = value_type(1); } else { if (tck_scalar[i] > threshold) - tck_mask[i] = 1.0; + tck_mask[i] = value_type(1); else - tck_mask[i] = 0.0; + tck_mask[i] = value_type(0); } } writer (tck_mask); diff --git a/cmd/tsfvalidate.cpp b/cmd/tsfvalidate.cpp index 4801180e15..17a01487cd 100644 --- a/cmd/tsfvalidate.cpp +++ b/cmd/tsfvalidate.cpp @@ -1,17 +1,19 @@ -/* Copyright (c) 2008-2017 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "types.h" @@ -72,7 +74,7 @@ void run () } Streamline track; - vector scalar; + TrackScalar scalar; size_t tck_counter = 0, tsf_counter = 0, length_mismatch_count = 0; { diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index c7ac0ca24c..1a9dcd6540 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -1,25 +1,28 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "types.h" #include "file/path.h" +#include "math/stats/fwe.h" #include "math/stats/glm.h" -#include "math/stats/permutation.h" +#include "math/stats/import.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" #include "stats/permtest.h" @@ -27,6 +30,8 @@ using namespace MR; using namespace App; +using namespace MR::Math::Stats; +using namespace MR::Math::Stats::GLM; @@ -36,19 +41,30 @@ void usage () SYNOPSIS = "Statistical testing of vector data using non-parametric permutation testing"; + DESCRIPTION + + "This command can be used to perform permutation testing of any form of data. " + "The data for each input subject must be stored in a text file, with one value per row. " + "The data for each row across subjects will be tested independently, i.e. there is no " + "statistical enhancement that occurs between the data; however family-wise error control " + "will be used." + + + Math::Stats::GLM::column_ones_description; + ARGUMENTS + Argument ("input", "a text file listing the file names of the input subject data").type_file_in () - + Argument ("design", "the design matrix. Note that a column of 1's will need to be added for correlations.").type_file_in () + + Argument ("design", "the design matrix").type_file_in () - + Argument ("contrast", "the contrast vector, specified as a single row of weights").type_file_in () + + Argument ("contrast", "the contrast matrix").type_file_in () - + Argument ("output", "the filename prefix for all output.").type_text(); + + Argument ("output", "the filename prefix for all output").type_text(); OPTIONS - + Stats::PermTest::Options (false); + + Math::Stats::shuffle_options (false) + + + Math::Stats::GLM::glm_options ("element"); } @@ -56,144 +72,204 @@ void usage () using Math::Stats::matrix_type; using Math::Stats::vector_type; +using Stats::PermTest::count_matrix_type; -void run() -{ +// Define data importer class that willl obtain data for a +// specific subject based on the string path to the data file for +// that subject +// +// This is far more simple than the equivalent functionality in other +// MRtrix3 statistical inference commands, since the data are +// already in a vectorised form. - // Read filenames - vector filenames; - { - std::string folder = Path::dirname (argument[0]); - std::ifstream ifs (argument[0].c_str()); - std::string temp; - while (getline (ifs, temp)) { - std::string filename (Path::join (folder, temp)); - size_t p = filename.find_last_not_of(" \t"); - if (std::string::npos != p) - filename.erase(p+1); - if (filename.size()) { - if (!MR::Path::exists (filename)) - throw Exception ("Input data vector file not found: \"" + filename + "\""); - filenames.push_back (filename); - } +class SubjectVectorImport : public SubjectDataImportBase +{ MEMALIGN(SubjectVectorImport) + public: + SubjectVectorImport (const std::string& path) : + SubjectDataImportBase (path), + data (load_vector (path)) { } + + void operator() (matrix_type::RowXpr row) const override + { + assert (size_t(row.size()) == size()); + row = data; } - } - const vector_type example_data = load_vector (filenames.front()); - const size_t num_elements = example_data.size(); + default_type operator[] (const size_t index) const override + { + assert (index < size()); + return data[index]; + } - size_t num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); + size_t size() const override { return data.size(); } - // Load design matrix - const matrix_type design = load_matrix (argument[2]); - if (size_t(design.rows()) != filenames.size()) - throw Exception ("number of subjects does not match number of rows in design matrix"); - - // Load permutations file if supplied - auto opt = get_options("permutations"); - vector > permutations; - if (opt.size()) { - permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - num_perms = permutations.size(); - if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); - } + private: + const vector_type data; - // Load contrast matrix - matrix_type contrast = load_matrix (argument[3]); - if (contrast.cols() > design.cols()) - throw Exception ("too many contrasts for design matrix"); - contrast.conservativeResize (contrast.rows(), design.cols()); +}; - const std::string output_prefix = argument[4]; - // Load input data - matrix_type data (num_elements, filenames.size()); - { - ProgressBar progress ("Loading input vector data", filenames.size()); - for (size_t subject = 0; subject < filenames.size(); subject++) { - - const std::string& path (filenames[subject]); - vector_type subject_data; - try { - subject_data = load_vector (path); - } catch (Exception& e) { - throw Exception (e, "Error loading vector data for subject #" + str(subject) + " (file \"" + path + "\""); - } - if (size_t(subject_data.size()) != num_elements) - throw Exception ("Vector data for subject #" + str(subject) + " (file \"" + path + "\") is wrong length (" + str(subject_data.size()) + " , expected " + str(num_elements) + ")"); +void run() +{ - data.col(subject) = subject_data; + CohortDataImport importer; + importer.initialise (argument[0]); + const size_t num_inputs = importer.size(); + CONSOLE ("Number of subjects: " + str(num_inputs)); + const size_t num_elements = importer[0]->size(); + CONSOLE ("Number of elements: " + str(num_elements)); + for (size_t i = 0; i != importer.size(); ++i) { + if (importer[i]->size() != num_elements) + throw Exception ("Subject file \"" + importer[i]->name() + "\" contains incorrect number of elements (" + str(importer[i]) + "; expected " + str(num_elements) + ")"); + } - ++progress; + // Load design matrix + const matrix_type design = load_matrix (argument[1]); + if (size_t(design.rows()) != num_inputs) + throw Exception ("Number of subjects (" + str(num_inputs) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); + + // Before validating the contrast matrix, we first need to see if there are any + // additional design matrix columns coming from element-wise subject data + vector extra_columns; + bool nans_in_columns = false; + auto opt = get_options ("column"); + for (size_t i = 0; i != opt.size(); ++i) { + extra_columns.push_back (CohortDataImport()); + extra_columns[i].initialise (opt[i][0]); + if (!extra_columns[i].allFinite()) + nans_in_columns = true; + } + const ssize_t num_factors = design.cols() + extra_columns.size(); + CONSOLE ("Number of factors: " + str(num_factors)); + if (extra_columns.size()) { + CONSOLE ("Number of element-wise design matrix columns: " + str(extra_columns.size())); + if (nans_in_columns) + CONSOLE ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from voxel-wise design matrices accordingly"); + } + check_design (design, extra_columns.size()); + + // Load variance groups + auto variance_groups = GLM::load_variance_groups (num_inputs); + const size_t num_vgs = variance_groups.size() ? variance_groups.maxCoeff()+1 : 1; + if (num_vgs > 1) + CONSOLE ("Number of variance groups: " + str(num_vgs)); + + // Load hypotheses + const vector hypotheses = Math::Stats::GLM::load_hypotheses (argument[2]); + const size_t num_hypotheses = hypotheses.size(); + if (hypotheses[0].cols() != num_factors) + throw Exception ("The number of columns in the contrast matrix (" + str(hypotheses[0].cols()) + ")" + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); + CONSOLE ("Number of hypotheses: " + str(num_hypotheses)); + + const std::string output_prefix = argument[3]; + + // Load input data + matrix_type data (num_inputs, num_elements); + for (size_t subject = 0; subject != num_inputs; subject++) + (*importer[subject]) (data.row(subject)); + + const bool nans_in_data = !data.allFinite(); + if (nans_in_data) { + INFO ("Non-finite values present in data; rows will be removed from element-wise design matrices accordingly"); + if (!extra_columns.size()) { + INFO ("(Note that this will result in slower execution than if such values were not present)"); } } + // Only add contrast matrix row number to image outputs if there's more than one hypothesis + auto postfix = [&] (const size_t i) { return (num_hypotheses > 1) ? ("_" + hypotheses[i].name()) : ""; }; + { - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation...", contrast.cols() + 3); + matrix_type betas (num_factors, num_elements); + matrix_type abs_effect_size (num_elements, num_hypotheses); + matrix_type std_effect_size (num_elements, num_hypotheses); + matrix_type stdev (num_vgs, num_elements); + vector_type cond (num_elements); - const matrix_type betas = Math::Stats::GLM::solve_betas (data, design); - for (size_t i = 0; i < size_t(contrast.cols()); ++i) { - save_vector (betas.col(i), output_prefix + "_beta_" + str(i) + ".csv"); - ++progress; - } + Math::Stats::GLM::all_stats (data, design, extra_columns, hypotheses, variance_groups, + cond, betas, abs_effect_size, std_effect_size, stdev); - const matrix_type abs_effects = Math::Stats::GLM::abs_effect_size (data, design, contrast); - save_vector (abs_effects.col(0), output_prefix + "_abs_effect.csv"); + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_hypotheses) + (nans_in_data || extra_columns.size() ? 1 : 0)); + save_matrix (betas, output_prefix + "betas.csv"); ++progress; - - const matrix_type std_effects = Math::Stats::GLM::std_effect_size (data, design, contrast); - vector_type first_std_effect = std_effects.col(0); - for (size_t i = 0; i != num_elements; ++i) { - if (!std::isfinite (first_std_effect[i])) - first_std_effect[i] = 0.0; + for (size_t i = 0; i != num_hypotheses; ++i) { + if (!hypotheses[i].is_F()) { + save_vector (abs_effect_size.col(i), output_prefix + "abs_effect" + postfix(i) + ".csv"); + ++progress; + if (num_vgs == 1) + save_vector (std_effect_size.col(i), output_prefix + "std_effect" + postfix(i) + ".csv"); + } else { + ++progress; + } + ++progress; } - save_vector (first_std_effect, output_prefix + "_std_effect.csv"); - ++progress; - - const matrix_type stdevs = Math::Stats::GLM::stdev (data, design); - save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); + if (nans_in_data || extra_columns.size()) { + save_vector (cond, output_prefix + "cond.csv"); + ++progress; + } + if (num_vgs == 1) + save_vector (stdev.row(0), output_prefix + "std_dev.csv"); + else + save_matrix (stdev, output_prefix + "std_dev.csv"); } - Math::Stats::GLMTTest glm_ttest (data, design, contrast); + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + if (variance_groups.size()) + glm_test.reset (new GLM::TestVariableHeteroscedastic (extra_columns, data, design, hypotheses, variance_groups, nans_in_data, nans_in_columns)); + else + glm_test.reset (new GLM::TestVariableHomoscedastic (extra_columns, data, design, hypotheses, nans_in_data, nans_in_columns)); + } else { + if (variance_groups.size()) + glm_test.reset (new GLM::TestFixedHeteroscedastic (data, design, hypotheses, variance_groups)); + else + glm_test.reset (new GLM::TestFixedHomoscedastic (data, design, hypotheses)); + } // Precompute default statistic // Don't use convenience function: No enhancer! - // Manually construct default permutation - vector default_permutation (filenames.size()); - for (size_t i = 0; i != filenames.size(); ++i) - default_permutation[i] = i; - vector_type default_tvalues; - glm_ttest (default_permutation, default_tvalues); - save_vector (default_tvalues, output_prefix + "_tvalue.csv"); + // Manually construct default shuffling matrix + // TODO Change to use convenience function; we make an empty enhancer later anyway + const matrix_type default_shuffle (matrix_type::Identity (num_inputs, num_inputs)); + matrix_type default_statistic, default_zstat; + (*glm_test) (default_shuffle, default_statistic, default_zstat); + for (size_t i = 0; i != num_hypotheses; ++i) { + save_matrix (default_statistic.col(i), output_prefix + (hypotheses[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); + save_matrix (default_zstat.col(i), output_prefix + "Zstat" + postfix(i) + ".csv"); + } // Perform permutation testing if (!get_options ("notest").size()) { + const bool fwe_strong = get_option_value ("strong", false); + if (fwe_strong && num_hypotheses == 1) { + WARN("Option -strong has no effect when testing a single hypothesis only"); + } + std::shared_ptr enhancer; - vector_type null_distribution (num_perms), uncorrected_pvalues (num_perms); - vector_type empirical_distribution; - - if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_ttest, enhancer, empirical_distribution, - default_tvalues, std::shared_ptr(), - null_distribution, std::shared_ptr(), - uncorrected_pvalues, std::shared_ptr()); + matrix_type null_distribution, uncorrected_pvalues; + count_matrix_type null_contributions; + matrix_type empirical_distribution; // unused + Stats::PermTest::run_permutations (glm_test, enhancer, empirical_distribution, default_zstat, fwe_strong, + null_distribution, null_contributions, uncorrected_pvalues); + if (fwe_strong) { + save_vector (null_distribution.col(0), output_prefix + "null_dist.csv"); } else { - Stats::PermTest::run_permutations (num_perms, glm_ttest, enhancer, empirical_distribution, - default_tvalues, std::shared_ptr(), - null_distribution, std::shared_ptr(), - uncorrected_pvalues, std::shared_ptr()); + for (size_t i = 0; i != num_hypotheses; ++i) + save_vector (null_distribution.col(i), output_prefix + "null_dist" + postfix(i) + ".csv"); + } + const matrix_type fwe_pvalues = MR::Math::Stats::fwe_pvalue (null_distribution, default_zstat); + for (size_t i = 0; i != num_hypotheses; ++i) { + save_vector (fwe_pvalues.col(i), output_prefix + "fwe_1mpvalue" + postfix(i) + ".csv"); + save_vector (uncorrected_pvalues.col(i), output_prefix + "uncorrected_pvalue" + postfix(i) + ".csv"); + save_vector (null_contributions.col(i), output_prefix + "null_contributions" + postfix(i) + ".csv"); } - - vector_type default_pvalues (num_elements); - Math::Stats::Permutation::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); - save_vector (default_pvalues, output_prefix + "_fwe_pvalue.csv"); - save_vector (uncorrected_pvalues, output_prefix + "_uncorrected_pvalue.csv"); } - } diff --git a/cmd/voxel2fixel.cpp b/cmd/voxel2fixel.cpp index c20f455bca..dfbe7f9d4a 100644 --- a/cmd/voxel2fixel.cpp +++ b/cmd/voxel2fixel.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "progressbar.h" #include "algo/loop.h" @@ -21,10 +22,13 @@ #include "fixel/helpers.h" #include "fixel/keys.h" #include "fixel/loop.h" +#include "fixel/types.h" using namespace MR; using namespace App; +using Fixel::index_type; + void usage () { @@ -48,7 +52,7 @@ void run () auto scalar = Image::open (argument[0]); std::string input_fixel_directory = argument[1]; Fixel::check_fixel_directory (input_fixel_directory); - auto input_fixel_index = Fixel::find_index_header (input_fixel_directory).get_image(); + auto input_fixel_index = Fixel::find_index_header (input_fixel_directory).get_image(); check_dimensions (scalar, input_fixel_index, 0, 3); std::string output_fixel_directory = argument[2]; diff --git a/cmd/voxel2mesh.cpp b/cmd/voxel2mesh.cpp index 3c35687436..55de9e9574 100644 --- a/cmd/voxel2mesh.cpp +++ b/cmd/voxel2mesh.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" diff --git a/cmd/warp2metric.cpp b/cmd/warp2metric.cpp index a4b70133cb..915942b50d 100644 --- a/cmd/warp2metric.cpp +++ b/cmd/warp2metric.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "algo/threaded_loop.h" @@ -35,8 +36,8 @@ void usage () REFERENCES + "Raffelt, D.; Tournier, JD/; Smith, RE.; Vaughan, DN.; Jackson, G.; Ridgway, GR. Connelly, A." // Internal - "Investigating White Matter Fibre Density and Morphology using Fixel-Based Analysis. \n" - "Neuroimage, 2016, 10.1016/j.neuroimage.2016.09.029\n"; + "Investigating White Matter Fibre Density and Morphology using Fixel-Based Analysis. " + "Neuroimage, 2017, 144, 58-73, doi: 10.1016/j.neuroimage.2016.09.029"; ARGUMENTS + Argument ("in", "the input deformation field").type_image_in(); diff --git a/cmd/warpconvert.cpp b/cmd/warpconvert.cpp index aa77e01094..8de075e9c3 100644 --- a/cmd/warpconvert.cpp +++ b/cmd/warpconvert.cpp @@ -1,24 +1,26 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "registration/warp/helpers.h" #include "registration/warp/compose.h" #include "registration/warp/convert.h" #include "adapter/extract.h" +#include "file/nifti_utils.h" using namespace MR; @@ -112,7 +114,11 @@ void run () // warpfull2deformation & warpfull2displacement } else if (type == 2 || type == 3) { - + if (!Path::is_mrtrix_image (argument[0]) && !(Path::has_suffix (argument[0], {".nii", ".nii.gz"}) && + File::Config::get_bool ("NIfTIAutoLoadJSON", false) && + Path::exists(File::NIfTI::get_json_path(opt[0][0])))) + WARN ("warp_full image is not in original .mif/.mih file format or in NIfTI file format with associated JSON. " + "Converting to other file formats may remove linear transformations stored in the image header."); auto warp = Image::open (argument[0]).with_direct_io (3); Registration::Warp::check_warp_full (warp); diff --git a/cmd/warpcorrect.cpp b/cmd/warpcorrect.cpp index 9295001b69..979aa98c37 100644 --- a/cmd/warpcorrect.cpp +++ b/cmd/warpcorrect.cpp @@ -1,18 +1,18 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - - #include "command.h" #include "image.h" #include "algo/threaded_loop.h" @@ -23,11 +23,13 @@ using namespace MR; using namespace App; +const float PRECISION = Eigen::NumTraits::dummy_precision(); + void usage () { - AUTHOR = "David Raffelt (david.raffelt@florey.edu.au)"; + AUTHOR = "David Raffelt (david.raffelt@florey.edu.au) & Max Pietsch (mail@maxpietsch.com)"; - SYNOPSIS = "Replaces voxels in a deformation field that point to 0,0,0 with nan,nan,nan"; + SYNOPSIS = "Replaces voxels in a deformation field that point to a specific out of bounds location with nan,nan,nan"; DESCRIPTION + "This can be used in conjunction with the warpinit command to compute a MRtrix " @@ -36,11 +38,49 @@ void usage () ARGUMENTS + Argument ("in", "the input warp image.").type_image_in () + Argument ("out", "the output warp image.").type_image_out (); + + OPTIONS + + Option ("marker", "single value or a comma separated list of values that define out of bounds voxels in the input warp image." + " Default: (0,0,0).") + + Argument ("coordinates").type_sequence_float() + + Option ("tolerance", "numerical precision used for L2 matrix norm comparison. Default: " + str(PRECISION) + ".") + + Argument ("value").type_float(PRECISION); } using value_type = float; +class BoundsCheck { MEMALIGN(BoundsCheck) + public: + BoundsCheck (value_type tolerance, const Eigen::Matrix& marker, size_t& total_count): + precision (tolerance), + vec (marker), + counter (total_count), + count (0) { } + template + void operator() (ImageTypeIn& in, ImageTypeOut& out) + { + val = Eigen::Matrix(in.row(3)); + if ((vec - val).isMuchSmallerThan(precision) || (vec.hasNaN() && val.hasNaN())) { + count++; + for (auto l = Loop (3) (out); l; ++l) + out.value() = NaN; + } else { + for (auto l = Loop (3) (in, out); l; ++l) + out.value() = in.value(); + } + } + virtual ~BoundsCheck () { + counter += count; + } + protected: + const value_type precision; + const Eigen::Matrix vec; + size_t& counter; + size_t count; + Eigen::Matrix val; +}; + void run () { @@ -49,16 +89,31 @@ void run () auto output = Image::create (argument[1], input); - auto func = [&](Image& in, Image& out) { - if (Eigen::Matrix(in.row(3)).norm() == 0.0) { - for (auto l = Loop (3) (out); l; ++l) - out.value() = NaN; - } else { - for (auto l = Loop (3) (in, out); l; ++l) - out.value() = in.value(); - } - }; + Eigen::Matrix oob_vector = Eigen::Matrix::Zero(); + auto opt = get_options ("marker"); + if (opt.size() == 1) { + const auto loc = parse_floats (opt[0][0]); + if (loc.size() == 1) { + oob_vector.fill(loc[0]); + } else if (loc.size() == 3) { + for (auto i=0; i<3; i++) + oob_vector[i] = loc[i]; + } else throw Exception("location option requires either single value or list of 3 values"); + } + + opt = get_options ("tolerance"); + value_type precision = PRECISION; + if (opt.size()) + precision = opt[0][0]; + + size_t count (0); + auto func = BoundsCheck (precision, oob_vector, count); ThreadedLoop ("correcting warp", input, 0, 3) .run (func, input, output); + + if (count == 0) + WARN("no out of bounds voxels found with value (" + + str(oob_vector[0]) + "," + str(oob_vector[1]) + "," + str(oob_vector[2]) + ")"); + INFO("converted " + str(count) + " out of bounds values"); } diff --git a/cmd/warpinit.cpp b/cmd/warpinit.cpp index 3e55ba7c05..f414d27181 100644 --- a/cmd/warpinit.cpp +++ b/cmd/warpinit.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "image.h" #include "stride.h" @@ -42,7 +43,7 @@ void usage () "directly (which may be useful to create images suitable for use in the " "registration program) using the following syntax:" - + " $ warpinit template.mif warp-[].nii"; + + " $ warpinit template.mif warp-'[]'.nii"; ARGUMENTS + Argument ("template", "the input template image.").type_image_in () diff --git a/cmd/warpinvert.cpp b/cmd/warpinvert.cpp index 26a91b1e33..c944e828f2 100644 --- a/cmd/warpinvert.cpp +++ b/cmd/warpinvert.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "command.h" #include "datatype.h" #include "header.h" diff --git a/configure b/configure index 480a06fd3b..6ff2d708ad 100755 --- a/configure +++ b/configure @@ -1,5 +1,22 @@ #!/usr/bin/env python +# Copyright (c) 2008-2019 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# pylint: disable=invalid-name + usage_string = ''' USAGE @@ -50,6 +67,9 @@ OPTIONS -openmp enable OpenMP compiler flags. + -conda prevent stripping anaconda/miniconda from the PATH (only use if + you intend building with the conda toolchain - not recommended) + ENVIRONMENT VARIABLES @@ -74,17 +94,17 @@ ENVIRONMENT VARIABLES The arguments expected by the compiler. The default is: "-c CFLAGS SRC -o OBJECT" - LD + LINK The linker command to use. The default is the same as CXX. - LD_ARGS + LINK_ARGS The arguments expected by the linker. The default is: - "LDFLAGS OBJECTS -o EXECUTABLE" + "LINKFLAGS OBJECTS -o EXECUTABLE" - LDLIB_ARGS + LINKLIB_ARGS The arguments expected by the linker for generating a shared library. The default is: - "-shared LDLIB_FLAGS OBJECTS -o LIB" + "-shared LINKLIB_FLAGS OBJECTS -o LIB" ARCH the specific CPU architecture to compile for. This variable will be @@ -96,10 +116,10 @@ ENVIRONMENT VARIABLES CFLAGS Any additional flags to the compiler. - LDFLAGS + LINKFLAGS Any additional flags to the linker. - LDLIB_FLAGS + LINKLIB_FLAGS Any additional flags to the linker to generate a shared library. EIGEN_CFLAGS @@ -111,19 +131,19 @@ ENVIRONMENT VARIABLES ZLIB_CFLAGS Any flags required to compile with the zlib compression library. - ZLIB_LDFLAGS + ZLIB_LINKFLAGS Any flags required to link with the zlib compression library. TIFF_CFLAGS Any flags required to compile with the TIFF library. - TIFF_LDFLAGS + TIFF_LINKFLAGS Any flags required to link with the TIFF library. FFTW_CFLAGS Any flags required to compile with the FFTW library. - FFTW_LDFLAGS + FFTW_LINKFLAGS Any flags required to link with the FFTW library. QMAKE @@ -145,14 +165,13 @@ ENVIRONMENT VARIABLES specified correctly at configure time. ''' -import subprocess, sys, os, platform, tempfile, shutil, shlex, re, copy +import subprocess, sys, os, platform, tempfile, shlex, re, copy system = platform.system().lower() # on Windows, need to use MSYS2 version of python - not MinGW version: if sys.executable[0].isalpha() and sys.executable[1] == ':': - python_cmd = subprocess.check_output ([ 'cygpath.exe', '-w', '/usr/bin/python' ]).splitlines()[0].strip() - sys.exit (subprocess.call ([ python_cmd.decode(errors='ignore') ] + sys.argv)) - + python_cmd = subprocess.check_output ([ 'cygpath.exe', '-w', '/usr/bin/python' ]).decode(errors='ignore').splitlines()[0].strip() + sys.exit (subprocess.call ([ python_cmd ] + sys.argv)) debug = False @@ -165,6 +184,7 @@ verbose = False R_module = False openmp = False dev = False +conda = False optimlevel = 3 @@ -172,30 +192,38 @@ for arg in sys.argv[1:]: if '-debug'.startswith (arg): debug = True optimlevel = 0 - elif '-dev'.startswith (arg): dev = True - elif '-assert'.startswith (arg): asserts = True - elif '-nooptim'.startswith (arg): optimlevel = 0 + elif '-dev'.startswith (arg): + dev = True + elif '-assert'.startswith (arg): + asserts = True + elif '-nooptim'.startswith (arg): + optimlevel = 0 elif '-profile'.startswith (arg): profile = True optimlevel = 0 - elif '-nogui'.startswith (arg): nogui = True - elif '-noshared'.startswith (arg): noshared = True + elif '-nogui'.startswith (arg): + nogui = True + elif '-noshared'.startswith (arg): + noshared = True elif '-static'.startswith (arg): static = True noshared = True - elif '-verbose'.startswith (arg): verbose = True + elif '-verbose'.startswith (arg): + verbose = True elif '-R'.startswith (arg): R_module = True #noshared = True nogui = True - elif '-openmp'.startswith (arg): openmp = True + elif '-openmp'.startswith (arg): + openmp = True + elif '-conda'.startswith (arg): + conda = True else: - print (usage_string) + sys.stdout.write (usage_string) sys.exit (1) -global logfile, config_report logfile = open (os.path.join (os.path.dirname(sys.argv[0]), 'configure.log'), 'wb') config_report = '' @@ -203,7 +231,7 @@ config_report = '' def log (message): global logfile logfile.write (message.encode (errors='ignore')) - if (verbose): + if verbose: sys.stdout.write (message) sys.stdout.flush() @@ -217,66 +245,78 @@ def report (message): def error (message): global logfile logfile.write (('\nERROR: ' + message.rstrip() + '\n\n').encode (errors='ignore')) - sys.stderr.write ('\nERROR: ' + message.rstrip() + '\n\n') + sys.stdout.write ('\nERROR: ' + message.rstrip() + '\n\n') + sys.stdout.flush() sys.exit (1) -if profile: build_type = 'profiling version' -elif debug: build_type = 'debug version' -else: build_type = 'release version' +if profile: + build_type = 'profiling version' +elif debug: + build_type = 'debug version' +else: + build_type = 'release version' build_options = [] -if asserts: build_options.append ('asserts') -if optimlevel <= 1: build_options.append ('nooptim') -if nogui: build_options.append ('nogui') -if noshared: build_options.append ('noshared') -if static: build_options.append ('static') -if openmp: build_options.append ('openmp') - -if len(build_options): +if asserts: + build_options.append ('asserts') +if optimlevel <= 1: + build_options.append ('nooptim') +if nogui: + build_options.append ('nogui') +if noshared: + build_options.append ('noshared') +if static: + build_options.append ('static') +if openmp: + build_options.append ('openmp') + +if build_options: build_type += ' with ' + ', '.join (build_options) report (""" MRtrix build type requested: """ + build_type + '\n\n') -# remove any mention of anaconda from PATH: -path = os.environ['PATH'] -if path.endswith ('\\'): - path = path[:-1] -cleanpath = []; -oldpath = path.split(':') -for entry in oldpath: - if 'anaconda' not in entry.lower(): - cleanpath += [ entry ] -if not oldpath == cleanpath: - report ('WARNING: Anaconda removed from PATH to avoid conflicts\n\n') -path = os.pathsep.join(cleanpath) +# if not using conda, remove any mention of conda from PATH: +issue_conda_warning = False +if conda: + path = os.environ['PATH'] +else: + path = [] + for entry in os.environ['PATH'].split(os.pathsep): + if 'conda' in entry: + report ('WARNING: anaconda/miniconda detected in PATH ("' + entry + '") - removed to avoid conflicts\n') + issue_conda_warning = True + else: + path += [ entry ] + path = os.pathsep.join(path) + os.environ['PATH'] = path -os.environ['PATH'] = path +log ('PATH set to: ' + path) -global cpp, cpp_cmd, ld, ld_args, ld_cmd +cpp = ld = None cxx = [ 'clang++', 'g++' ] cxx_args = '-c CFLAGS SRC -o OBJECT'.split() cpp_flags = [ '-std=c++11', '-DMRTRIX_BUILD_TYPE="'+build_type+'"' ] -ld_args = 'OBJECTS LDFLAGS -o EXECUTABLE'.split() +ld_args = 'OBJECTS LINKFLAGS -o EXECUTABLE'.split() ld_flags = [] if system != 'darwin': ld_flags += [ '-Wl,--sort-common,--as-needed' ] if static: - ld_flags += [ '-static', '-Wl,-u,pthread_cancel,-u,pthread_cond_broadcast,-u,pthread_cond_destroy,-u,pthread_cond_signal,-u,pthread_cond_wait,-u,pthread_create,-u,pthread_detach,-u,pthread_cond_signal,-u,pthread_equal,-u,pthread_join,-u,pthread_mutex_lock,-u,pthread_mutex_unlock,-u,pthread_once,-u,pthread_setcancelstate' ] + ld_flags += [ '-static', '-Wl,--whole-archive', '-lpthread', '-Wl,--no-whole-archive'] -ld_lib_args = 'OBJECTS LDLIB_FLAGS -o LIB'.split() +ld_lib_args = 'OBJECTS LINKLIB_FLAGS -o LIB'.split() -class TempFile: +class TempFile(object): def __init__ (self, suffix): self.fid = None self.name = None @@ -286,17 +326,15 @@ class TempFile: def __enter__ (self): return self - def __exit__(self, type, value, traceback): + def __exit__(self, exception_type, value, traceback): try: os.unlink (self.name) - except OSError as error: - log ('error deleting temporary file "' + self.name + '": ' + error.strerror) - except: - raise + except OSError as excp: + log ('error deleting temporary file "' + self.name + '": ' + excp.strerror) -class DeleteAfter: +class DeleteAfter(object): def __init__ (self, name): self.name = name @@ -306,44 +344,45 @@ class DeleteAfter: def __exit__(self, exception_type, value, traceback): try: os.unlink (self.name) - except OSError as error: - log ('error deleting temporary file "' + self.name + '": ' + error.strerror) - except: - raise + except OSError as excp: + log ('error deleting temporary file "' + self.name + '": ' + excp.strerror) -class TempDir: +class TempDir(object): def __init__ (self): - self.name = tempfile.mkdtemp (); + self.name = tempfile.mkdtemp() def __enter__ (self): return self - def __exit__(self, type, value, traceback): + def __exit__(self, exception_type, value, traceback): try: - for entry in os.listdir (self.name): - fname = os.path.join (self.name, entry) - if os.path.isdir (fname): - os.rmdir (fname) + for basename in os.listdir (self.name): + fullpath = os.path.join (self.name, basename) + if os.path.isdir (fullpath): + os.rmdir (fullpath) else: - os.unlink (fname) + os.unlink (fullpath) os.rmdir (self.name) - except OSError as error: - log ('error deleting temporary folder "' + self.name + '": ' + error.strerror) - except: - raise + except OSError as excp: + log ('error deleting temporary folder "' + self.name + '": ' + excp.strerror) # error handling helpers: - -class VersionError (Exception): pass -class QMakeError (Exception): pass -class QMOCError (Exception): pass -class CompileError (Exception): pass -class LinkError (Exception): pass -class RuntimeError (Exception): pass +class VersionError (Exception): + pass +class QMakeError (Exception): + pass +class QMOCError (Exception): + pass +class CompileError (Exception): + pass +class LinkError (Exception): + pass +class RunError (Exception): + pass def compiler_hint (cmd, flags_var, flags, args_var=None, args=None): ret=''' @@ -425,22 +464,27 @@ def qt_exec_hint (name): def commit (name, variable): cache.write (name + ' = ') - if type (variable) == type([]): + if isinstance (variable, list): cache.write ('[') - if len(variable): cache.write(' \'' + '\', \''.join (variable) + '\' ') + if variable: + cache.write(' \'' + '\', \''.join (variable) + '\' ') cache.write (']\n') - else: cache.write ('\'' + variable + '\'\n') + else: + cache.write ('\'' + variable + '\'\n') -def fillin (template, keyvalue): - cmd = [] +def fillin (template, keyvalues): + command_string = [] for item in template: - if item in keyvalue: - if type(keyvalue[item]) == type ([]): cmd += keyvalue[item] - else: cmd += [ keyvalue[item] ] - else: cmd += [ item ] - return cmd + if item in keyvalues: + if isinstance(keyvalues[item], list): + command_string += keyvalues[item] + else: + command_string += [ keyvalues[item] ] + else: + command_string += [ item ] + return command_string @@ -452,17 +496,18 @@ def execute (cmd, exception, raise_on_non_zero_exit_code = True, cwd = None): log ('EXIT: ' + str(process.returncode) + '\n') stdout = stdout.decode(errors='ignore').rstrip() - if len (stdout): log ('STDOUT:\n' + stdout + '\n') + if stdout: + log ('STDOUT:\n' + stdout + '\n') stderr = stderr.decode(errors='ignore').rstrip() - if len (stderr): log ('STDERR:\n' + stderr + '\n') + if stderr: + log ('STDERR:\n' + stderr + '\n') log ('>>\n\n') - except OSError as error: - log ('error invoking command "' + cmd[0] + '": ' + error.strerror + '\n>>\n\n') + except OSError as e: + log ('error invoking command "' + cmd[0] + '": ' + e.strerror + '\n>>\n\n') raise exception - except: - print ('Unexpected error:', str(sys.exc_info())) - raise + except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) else: if raise_on_non_zero_exit_code and process.returncode != 0: raise exception (stderr) @@ -472,39 +517,35 @@ def execute (cmd, exception, raise_on_non_zero_exit_code = True, cwd = None): -def compile (source, compiler_flags = [], linker_flags = []): +def compile (source, compiler_flags, linker_flags): # pylint: disable=redefined-builtin global cpp, ld - with TempFile ('.cpp') as F: - log ('\nCOMPILE ' + F.name + ':\n---\n' + source + '\n---\n') - F.fid.write (source) - F.fid.flush() - F.fid.close() - with DeleteAfter (F.name[:-4] + '.o') as obj: - cmd = fillin (cpp, { - 'CFLAGS': compiler_flags, - 'SRC': F.name, - 'OBJECT': obj.name }) - execute (cmd, CompileError) + with TempFile ('.cpp') as srcfile: + log ('\nCOMPILE ' + srcfile.name + ':\n---\n' + source + '\n---\n') + srcfile.fid.write (source) + srcfile.fid.flush() + srcfile.fid.close() + with DeleteAfter (srcfile.name[:-4] + '.o') as objfile: + execute (fillin (cpp, { + 'CFLAGS': compiler_flags, + 'SRC': srcfile.name, + 'OBJECT': objfile.name }), CompileError) with DeleteAfter ('a.out') as out: - cmd = fillin (ld, { - 'LDFLAGS': linker_flags, - 'OBJECTS': obj.name, - 'EXECUTABLE': out.name }) - execute (cmd, LinkError) + execute (fillin (ld, { + 'LINKFLAGS': linker_flags, + 'OBJECTS': objfile.name, + 'EXECUTABLE': out.name }), LinkError) - ret = execute ([ './'+out.name ], RuntimeError) + return execute ([ './'+out.name ], RunError)[1] - return ret[1] - -def compare_version (needed, observed): - needed = [ float(n) for n in needed.split()[0].split('.') ] - observed = [ float(n) for n in observed.split()[0].split('.') ] - for n in zip (needed, observed): - if n[0] > n[1]: - return False - return True +#def compare_version (needed, observed): +# needed = [ float(n) for n in needed.split()[0].split('.') ] +# observed = [ float(n) for n in observed.split()[0].split('.') ] +# for n in zip (needed, observed): +# if n[0] > n[1]: +# return False +# return True @@ -523,13 +564,13 @@ def get_flags (default=None, env=None, pkg_config_flags=None): if pkg_config_flags: try: flags = [] - for entry in shlex.split (execute ([ 'pkg-config' ] + pkg_config_flags.split(), RuntimeError)[1]): - if entry.startswith ('-I'): - flags += [ '-idirafter', entry[2:] ] + for flag in shlex.split (execute ([ 'pkg-config' ] + pkg_config_flags.split(), RunError)[1]): + if flag.startswith ('-I'): + flags += [ '-idirafter', flag[2:] ] else: - flags += [ entry ] + flags += [ flag ] return flags - except: + except Exception: log('error running "pkg-config ' + pkg_config_flags + '"\n\n') return default @@ -548,12 +589,12 @@ def compile_test (name, cflags, ldflags, code, on_success='ok', on_failure='not report ('Checking for ' + name + ': ') try: stdout = compile (code, cflags, ldflags) - if len(stdout): + if stdout: report (stdout.splitlines()[0] + '\n') else: report (on_success+'\n') return True - except: + except Exception: report (on_failure+'\n') return False @@ -584,7 +625,7 @@ def compile_check (full_name, name, cflags, ldflags, code, cflags_env=None, cfla report ('Checking for ' + full_name + ': ') try: stdout = compile (code, cflags, ldflags) - if len(stdout): + if stdout: report (stdout.splitlines()[0] + '\n') else: report (on_success+'\n') @@ -596,7 +637,7 @@ def compile_check (full_name, name, cflags, ldflags, code, cflags_env=None, cfla error ('error compiling ' + name + ''' application! MRtrix3 was unable to compile a test program involving ''' + name + '.' + hint) - except LinkError as e: + except LinkError: if cflags_env and cflags_hint: hint = linker_flags_hint (name, ldflags_env, ldflags_hint) else: @@ -604,12 +645,12 @@ def compile_check (full_name, name, cflags, ldflags, code, cflags_env=None, cfla error ('error linking ' + name + ''' application! MRtrix3 was unable to link a test program involving ''' + name + '.' + hint) - except RuntimeError: + except RunError: error ('''runtime error! Unable to configure ''' + name + configure_log_hint) - except: - error ('unexpected exception!' + configure_log_hint) + except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) @@ -628,7 +669,6 @@ ld_lib_flags = [] if system.startswith('mingw') or system.startswith('msys'): system = 'windows' -report ('Detecting OS: ' + system + '\n') if system == 'linux': cpp_flags += [ '-pthread', '-fPIC' ] lib_suffix = '.so' @@ -656,7 +696,7 @@ elif system == 'darwin': elif 'MACOSX_DEPLOYMENT_TARGET' in os.environ: macosx_version = os.environ['MACOSX_DEPLOYMENT_TARGET'] else: - macosx_version = ('.'.join(execute([ 'sw_vers', '-productVersion' ], RuntimeError)[1].split('.')[:2])) + macosx_version = ('.'.join(execute([ 'sw_vers', '-productVersion' ], RunError)[1].split('.')[:2])) report ('OS X deployment target: ' + macosx_version + '\n') cpp_flags += [ '-DMRTRIX_MACOSX', '-fPIC', '-mmacosx-version-min='+macosx_version ] ld_flags += [ '-mmacosx-version-min='+macosx_version ] @@ -667,32 +707,60 @@ elif system == 'darwin': -if 'ARCH' in os.environ.keys(): - march = os.environ['ARCH'] - if march: - report ('Machine architecture set by ARCH environment variable to: ' + march + '\n') - cpp_flags += [ '-march='+march ] +# set CPP compiler: +ld_cmdline = None +if 'CXX' in os.environ.keys(): + cxx_env = os.environ['CXX'] + if not conda and 'conda' in cxx_env: + report ('WARNING: anaconda/miniconda compiler set by CXX environment variable - ignored to avoid conflicts\n') + issue_conda_warning = True + else: + cxx = shlex.split (cxx_env) +if 'CXX_ARGS' in os.environ.keys(): + cxx_args = shlex.split (os.environ['CXX_ARGS']) +if 'LINK' in os.environ.keys(): + ld_env = os.environ['LINK'] + if not conda and 'conda' in ld_env: + report ('WARNING: anaconda/miniconda linker set by LINK environment variable - ignored to avoid conflicts\n') + issue_conda_warning = True + else: + ld_cmdline = shlex.split (ld_env) +if 'LINK_ARGS' in os.environ.keys(): + ld_args = shlex.split (os.environ['LINK_ARGS']) +if 'LINKLIB_ARGS' in os.environ.keys(): + ld_lib_args = shlex.split (os.environ['LINKLIB_ARGS']) -# set CPP compiler: -ld_cmdline = None -if 'CXX' in os.environ.keys(): cxx = shlex.split (os.environ['CXX']) -if 'CXX_ARGS' in os.environ.keys(): cxx_args = shlex.split (os.environ['CXX_ARGS']) -if 'LD' in os.environ.keys(): ld_cmdline = shlex.split (os.environ['LD']) -if 'LD_ARGS' in os.environ.keys(): ld_args = shlex.split (os.environ['LD_ARGS']) -if 'LDLIB_ARGS' in os.environ.keys(): ld_lib_args = shlex.split (os.environ['LDLIB_ARGS']) + +if issue_conda_warning: + report ('\nNOTE: if you intend to build with anaconda/miniconda (not recommended), pass the -conda flag to ./configure\n\n') + + +report ('Detecting OS: ' + system + '\n') + +if 'ARCH' in os.environ.keys(): + march = os.environ['ARCH'] + if march: + report ('Machine architecture set by ARCH environment variable to: ' + march + '\n') + cpp_flags += [ '-march='+march ] + + + # CPP flags: -if 'CFLAGS' in os.environ.keys(): cpp_flags += shlex.split (os.environ['CFLAGS']) -if 'LDFLAGS' in os.environ.keys(): ld_flags += shlex.split (os.environ['LDFLAGS']) +if 'CFLAGS' in os.environ.keys(): + cpp_flags += shlex.split (os.environ['CFLAGS']) +if 'LINKFLAGS' in os.environ.keys(): + ld_flags += shlex.split (os.environ['LINKFLAGS']) ld_lib_flags += ld_flags -if 'LDLIB_FLAGS' in os.environ.keys(): ld_lib_flags += shlex.split (os.environ['LDLIB_FLAGS']) +if 'LINKLIB_FLAGS' in os.environ.keys(): + ld_lib_flags += shlex.split (os.environ['LINKLIB_FLAGS']) for candidate in cxx: report ('Looking for compiler [' + candidate + ']: ') @@ -706,9 +774,11 @@ for candidate in cxx: try: compiler_version = execute ([ cpp[0], '--version' ], CompileError)[1] - if len(compiler_version) == 0: report ('(no version information)\n') - else: report (compiler_version.splitlines()[0] + '\n') - except: + if not compiler_version: + report ('(no version information)\n') + else: + report (compiler_version.splitlines()[0] + '\n') + except Exception: report ('not found\n') continue @@ -739,32 +809,32 @@ else: if not noshared: report ('Checking shared library generation: ') - with TempFile ('.cpp') as F: - F.fid.write ('int bogus() { return (1); }') - F.fid.flush() - F.fid.close() - with DeleteAfter (F.name[:-4] + '.o') as obj: - cmd = fillin (cpp, { - 'CFLAGS': cpp_flags, - 'SRC': F.name, - 'OBJECT': obj.name }) - try: execute (cmd, CompileError) + with TempFile ('.cpp') as bogus_cpp: + bogus_cpp.fid.write ('int bogus() { return (1); }') + bogus_cpp.fid.flush() + bogus_cpp.fid.close() + with DeleteAfter (bogus_cpp.name[:-4] + '.o') as bogus_obj: + try: + execute (fillin (cpp, { + 'CFLAGS': cpp_flags, + 'SRC': bogus_cpp.name, + 'OBJECT': bogus_obj.name }), CompileError) except CompileError: error ('compiler not found!' + configure_log_hint) - except: - error ('unexpected exception!' + configure_log_hint) + except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) with DeleteAfter (lib_prefix + 'test' + lib_suffix) as lib: - cmd = fillin (ld_lib, { - 'LDLIB_FLAGS': ld_lib_flags, - 'OBJECTS': obj.name, - 'LIB': lib.name }) - try: execute (cmd, LinkError) + try: + execute (fillin (ld_lib, { + 'LINKLIB_FLAGS': ld_lib_flags, + 'OBJECTS': bogus_obj.name, + 'LIB': lib.name }), LinkError) except LinkError: error ('''linker not found! - MRtrix3 was unable to employ the linker program for shared library generation.''' + compiler_hint ('shared library linker', 'LDLIB_FLAGS', '"-L/usr/local/lib"', 'LDLIB_ARGS', '"-shared LDLIB_FLAGS OBJECTS -o LIB"')) - except: - error ('unexpected exception!' + configure_log_hint) + MRtrix3 was unable to employ the linker program for shared library generation.''' + compiler_hint ('shared library linker', 'LINKLIB_FLAGS', '"-L/usr/local/lib"', 'LINKLIB_ARGS', '"-shared LINKLIB_FLAGS OBJECTS -o LIB"')) + except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) report ('ok\n') @@ -787,11 +857,12 @@ int main() { } ''', cpp_flags, ld_flags)) report (str(8*pointer_size) + ' bit\n') - if pointer_size == 8: cpp_flags += [ '-DMRTRIX_WORD64' ] + if pointer_size == 8: + cpp_flags += [ '-DMRTRIX_WORD64' ] elif pointer_size != 4: error ('unexpected pointer size!') -except: - error ('unable to determine pointer size!' + configure_log_hint) +except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) @@ -876,8 +947,7 @@ int main() { eigen_cflags = get_flags ([ '-isystem', '/usr/include/eigen3' ], 'EIGEN_CFLAGS', '--cflags eigen3') -compile_check ('Eigen3 library', 'Eigen3', cpp_flags + eigen_cflags, ld_flags, - ''' +compile_check ('Eigen3 library', 'Eigen3', cpp_flags + eigen_cflags, ld_flags, ''' #include #include #include @@ -888,10 +958,32 @@ int main (int argc, char* argv[]) { } ''', 'EIGEN_CFLAGS', '"-isystem /usr/include/eigen3"') + if not openmp: eigen_cflags += [ '-DEIGEN_DONT_PARALLELIZE' ] +if compile_test ('Eigen3 Unsupported', cpp_flags + eigen_cflags, ld_flags, ''' +#include +#include +#include + +using array_type = Eigen::Array; + +int main (int argc, char* argv[]) { + auto test = Eigen::betainc (array_type::Constant (10.0), array_type::Constant (0.5), array_type::Constant (1.0)); + std::cout << "Present"; + return (0); +} +''', on_failure='not found; custom functions to be used'): + cpp_flags += [ '-DMRTRIX_HAVE_EIGEN_UNSUPPORTED_SPECIAL_FUNCTIONS' ] + + + + + + + @@ -900,7 +992,7 @@ if not openmp: # zlib: zlib_cflags = get_flags ([], 'ZLIB_CFLAGS', '--cflags zlib') -zlib_ldflags = get_flags ([ '-lz' ], 'ZLIB_LDFLAGS', '--libs zlib') +zlib_ldflags = get_flags ([ '-lz' ], 'ZLIB_LINKFLAGS', '--libs zlib') compile_check ('zlib compression library', 'zlib', cpp_flags + zlib_cflags, ld_flags + zlib_ldflags, ''' #include @@ -910,7 +1002,7 @@ int main() { std::cout << zlibVersion(); return (0); } -''', 'ZLIB_CFLAGS', '"-isystem /usr/local/include"', 'ZLIB_LDFLAGS', '"-L/usr/local/lib -lz"') +''', 'ZLIB_CFLAGS', '"-isystem /usr/local/include"', 'ZLIB_LINKFLAGS', '"-L/usr/local/lib -lz"') cpp_flags += zlib_cflags ld_flags += zlib_ldflags @@ -923,8 +1015,8 @@ ld_lib_flags += zlib_ldflags # Test that JSON for Modern C++ will compile, since it enforces its own requirements -compile_check ('"JSON for Modern C++" requirements', 'JSON for modern C++', -cpp_flags + [ '-I'+os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'core')) ], ld_flags, ''' +compile_check ('"JSON for Modern C++" requirements', 'JSON for modern C++', \ + cpp_flags + [ '-I'+os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'core')) ], ld_flags, ''' #include "''' + os.path.join('file', 'json.h') + '''" int main (int argc, char* argv[]) { @@ -943,7 +1035,7 @@ int main (int argc, char* argv[]) # TIFF: tiff_cflags = get_flags ([], 'TIFF_CFLAGS', '--cflags libtiff-4') -tiff_ldflags = get_flags ([ '-ltiff' ], 'TIFF_LDFLAGS', '--libs libtiff-4') +tiff_ldflags = get_flags ([ '-ltiff' ], 'TIFF_LINKFLAGS', '--libs libtiff-4') if compile_test ('TIFF library', cpp_flags + tiff_cflags, ld_flags + tiff_ldflags, ''' #include @@ -961,11 +1053,35 @@ int main() { + +# PNG: + +png_cflags = get_flags ([], 'PNG_CFLAGS', '--cflags libpng') +png_ldflags = get_flags ([ '-lpng' ], 'PNG_LINKFLAGS', '--libs libpng') + +if compile_test ('PNG library', cpp_flags + png_cflags, ld_flags + png_ldflags, ''' +#include +#include + +int main() { + std::cout << "Header: " << PNG_LIBPNG_VER_STRING << "; library: " << png_libpng_ver; + return (0); +} +''', on_failure='not found - PNG support disabled'): + cpp_flags += [ '-DMRTRIX_PNG_SUPPORT' ] + png_cflags + ld_flags += png_ldflags + ld_lib_flags += png_ldflags + + + + + + # FFTW: fftw_cflags = get_flags ([], 'FFTW_CFLAGS', '--cflags fftw3') -fftw_ldflags = get_flags ([ '-lfftw3' ], 'FFTW_LDFLAGS', '--libs fftw3') +fftw_ldflags = get_flags ([ '-lfftw3' ], 'FFTW_LINKFLAGS', '--libs fftw3') if compile_test ('FFTW library', cpp_flags + fftw_cflags, ld_flags + fftw_ldflags, ''' #include @@ -1009,8 +1125,9 @@ def get_qt_version(cmd_list, raise_on_non_zero_exit_code): out = execute (cmd_list, raise_on_non_zero_exit_code, False) stdouterr = ' '.join(out[1:]).replace(r'\n',' ').replace(r'\r','') version_found = version_regex.search(stdouterr) - if version_found: return version_found.group() - else: raise raise_on_non_zero_exit_code('Version not Found') + if version_found: + return version_found.group() + raise raise_on_non_zero_exit_code('Version not Found') moc = '' @@ -1040,8 +1157,8 @@ if not nogui: error (''' Qt moc not found! MRtrix3 was unable to locate the Qt meta-object compiler 'moc'.''' + qt_path_hint) - except: - error ('unexpected exception!' + configure_log_hint) + except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) report ('Checking for Qt qmake: ') qmake = 'qmake' @@ -1060,8 +1177,8 @@ if not nogui: error (''' Qt qmake not found! MRtrix3 was unable to locate the Qt command 'qmake'.''' + qt_path_hint) - except: - error ('unexpected exception!' + configure_log_hint) + except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) @@ -1083,8 +1200,8 @@ if not nogui: error (''' Qt rcc not found! MRtrix3 was unable to locate the Qt command 'rcc'.''' + qt_path_hint) - except: - error ('unexpected exception!' + configure_log_hint) + except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) @@ -1093,7 +1210,7 @@ if not nogui: try: with TempDir() as qt_dir: - file = '''#include + filetext = '''#include class Foo: public QObject { Q_OBJECT; @@ -1108,13 +1225,13 @@ class Foo: public QObject { int value_; }; ''' - log ('\nsource file "qt.h":\n---\n' + file + '---\n') + log ('\nsource file "qt.h":\n---\n' + filetext + '---\n') - f=open (os.path.join (qt_dir.name, 'qt.h'), 'w') - f.write (file) - f.close(); + f = open (os.path.join (qt_dir.name, 'qt.h'), 'w') + f.write (filetext) + f.close() - file = '''#include + filetext = '''#include #include "qt.h" Foo::Foo() : value_ (42) { connect (this, SIGNAL(valueChanged(int)), this, SLOT(setValue(int))); } @@ -1126,37 +1243,36 @@ void Foo::setValue (int value) { value_ = value; } int main() { Foo f; } ''' - log ('\nsource file "qt.cpp":\n---\n' + file + '---\n') - f=open (os.path.join (qt_dir.name, 'qt.cpp'), 'w') - f.write (file) - f.close(); + log ('\nsource file "qt.cpp":\n---\n' + filetext + '---\n') + f = open (os.path.join (qt_dir.name, 'qt.cpp'), 'w') + f.write (filetext) + f.close() - file = 'CONFIG += c++11' - if debug: file += ' debug' - file += '\nQT += core gui opengl svg\n' - file += 'HEADERS += qt.h\nSOURCES += qt.cpp\n' + filetext = 'CONFIG += c++11' + if debug: + filetext += ' debug' + filetext += '\nQT += core gui opengl svg network\n' + filetext += 'HEADERS += qt.h\nSOURCES += qt.cpp\n' if system == "darwin": - file += 'QMAKE_MACOSX_DEPLOYMENT_TARGET = '+macosx_version + '\n' + filetext += 'QMAKE_MACOSX_DEPLOYMENT_TARGET = ' + macosx_version + '\n' - log ('\nproject file "qt.pro":\n---\n' + file + '---\n') - f=open (os.path.join (qt_dir.name, 'qt.pro'), 'w') - f.write (file) - f.close(); + log ('\nproject file "qt.pro":\n---\n' + filetext + '---\n') + f = open (os.path.join (qt_dir.name, 'qt.pro'), 'w') + f.write (filetext) + f.close() qmake_cmd = [ qmake ] try: - (retcode, stdout, stderr) = execute (qmake_cmd, QMakeError, raise_on_non_zero_exit_code = False, cwd=qt_dir.name) - if retcode != 0: - error ('''qmake returned with error: + (qmake_retcode, qmake_stderr) = execute (qmake_cmd, QMakeError, raise_on_non_zero_exit_code = False, cwd=qt_dir.name)[0:3:2] + if qmake_retcode != 0: + error ('''qmake returned with error: -''' + stderr) - except QMakeError as E: +''' + qmake_stderr) + except QMakeError: error ('''error issuing qmake command! Use the QMAKE environment variable to set the correct qmake command for use with Qt''') - except: - raise qt_defines = [] @@ -1198,7 +1314,7 @@ int main() { Foo f; } if entry[2:].startswith('..'): qt_includes[index] = '-I' + os.path.abspath(qt_dir.name + '/' + entry[2:]) - qt_cflags = [e for e in qt_cflags if e != '-O2'] + qt_cflags = [ entry for entry in qt_cflags if entry != '-O2' ] qt = qt_cflags + qt_defines + qt_includes qt_cflags = [] for entry in qt: @@ -1212,23 +1328,22 @@ int main() { Foo f; } qt = qt_ldflags + qt_libs qt_ldflags = [] for entry in qt: - if entry[0] != '$': qt_ldflags += [ entry.replace('\"','').replace("'",'') ] + if entry[0] != '$': + qt_ldflags += [ entry.replace('\"','').replace("'",'') ] - cmd = [ moc, 'qt.h', '-o', 'qt_moc.cpp' ] - execute (cmd, QMOCError, cwd=qt_dir.name) #process = subprocess.Popen (cmd, cwd=qt_dir.name, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + execute ([ moc, 'qt.h', '-o', 'qt_moc.cpp' ], \ + QMOCError, cwd=qt_dir.name) - cmd = [ cpp[0], '-c' ] + cpp_flags + qt_cflags + [ 'qt.cpp', '-o', 'qt.o' ] - execute (cmd, CompileError, cwd=qt_dir.name) + execute ([ cpp[0], '-c' ] + cpp_flags + qt_cflags + [ 'qt.cpp', '-o', 'qt.o' ], \ + CompileError, cwd=qt_dir.name) - cmd = [ cpp[0], '-c' ] + cpp_flags + qt_cflags + [ 'qt_moc.cpp', '-o', 'qt_moc.o' ] - execute (cmd, CompileError, cwd=qt_dir.name) + execute ([ cpp[0], '-c' ] + cpp_flags + qt_cflags + [ 'qt_moc.cpp', '-o', 'qt_moc.o' ], \ + CompileError, cwd=qt_dir.name) - cmd = [ cpp[0] ] + ld_flags + [ 'qt_moc.o', 'qt.o', '-o', 'qt' ] + qt_ldflags - execute (cmd, LinkError, cwd=qt_dir.name) + execute ([ cpp[0] ] + ld_flags + [ 'qt_moc.o', 'qt.o', '-o', 'qt' ] + qt_ldflags, \ + LinkError, cwd=qt_dir.name) - cmd = [ os.path.join(qt_dir.name, 'qt') ] - ret = execute (cmd, RuntimeError) - report (ret[1] + '\n') + report (execute ([ os.path.join(qt_dir.name, 'qt') ], RunError)[1] + '\n') except QMakeError: @@ -1239,27 +1354,26 @@ int main() { Foo f; } error ('error linking Qt application!' + configure_log_hint) except CompileError: error ('error compiling Qt application!' + configure_log_hint) - except RuntimeError: + except RunError: error ('error running Qt application!' + configure_log_hint) except OSError as e: error ('unexpected error: ' + str(e) + configure_log_hint) - except: - error ('unexpected exception!' + configure_log_hint) + except Exception as excp: + error ('unexpected exception of type ' + type(excp).__name__ + ': ' + str(excp) + configure_log_hint) if system == "darwin": - if '-Wall' in qt_cflags: qt_cflags.remove ('-Wall') - if '-W' in qt_cflags: qt_cflags.remove ('-W') + qt_cflags = [ x for x in qt_cflags if x not in [ '-Wall', '-W' ] ] # output R module: if R_module: R_cflags = get_flags (default=[ '-isystem /usr/include/R' ], env='R_CFLAGS', pkg_config_flags='--cflags libR') - R_ldflags = get_flags (default=[ '-L/usr/lib/R/lib', '-lR' ], env='R_LDFLAGS', pkg_config_flags='--libs libR') + R_ldflags = get_flags (default=[ '-L/usr/lib/R/lib', '-lR' ], env='R_LINKFLAGS', pkg_config_flags='--libs libR') compile_check ('R library', 'R', cpp_flags + R_cflags, ld_flags + R_ldflags, ''' #include @@ -1270,7 +1384,7 @@ if R_module: std::cout << R_MAJOR << "." << R_MINOR << " (r" << R_SVN_REVISION << ")\\n"; return 0; } - ''', 'R_CFLAGS', '"-isystem /usr/local/include/R"', 'R_LDFLAGS', '"-L/usr/local/R/lib -lR"') + ''', 'R_CFLAGS', '"-isystem /usr/local/include/R"', 'R_LINKFLAGS', '"-L/usr/local/R/lib -lR"') cpp_flags += R_cflags + [ '-DMRTRIX_AS_R_LIBRARY' ] ld_lib_flags += R_ldflags @@ -1332,18 +1446,18 @@ commit ('obj_suffix', obj_suffix) commit ('exe_suffix', exe_suffix) commit ('lib_prefix', lib_prefix) commit ('lib_suffix', lib_suffix) -commit ('cpp', cpp); -commit ('cpp_flags', cpp_flags); -commit ('ld', ld); -commit ('ld_flags', ld_flags); -commit ('runpath', runpath); +commit ('cpp', cpp) +commit ('cpp_flags', cpp_flags) +commit ('ld', ld) +commit ('ld_flags', ld_flags) +commit ('runpath', runpath) cache.write ('ld_enabled = ') if noshared: cache.write ('False\n') else: cache.write ('True\n') - commit ('ld_lib', ld_lib); - commit ('ld_lib_flags', ld_lib_flags); + commit ('ld_lib', ld_lib) + commit ('ld_lib_flags', ld_lib_flags) commit ('eigen_cflags', eigen_cflags) commit ('moc', moc) @@ -1361,4 +1475,3 @@ if dev: cache.close() sys.stdout.write ('ok\n\n') - diff --git a/core/adapter/base.h b/core/adapter/base.h index df5443663c..2399ff199f 100644 --- a/core/adapter/base.h +++ b/core/adapter/base.h @@ -1,22 +1,24 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_adapter_base_h__ #define __algo_adapter_base_h__ #include "image_helpers.h" +#include "types.h" namespace MR { @@ -54,7 +56,7 @@ namespace MR FORCE_INLINE default_type spacing (size_t axis) const { return parent_.spacing (axis); } FORCE_INLINE ssize_t stride (size_t axis) const { return parent_.stride (axis); } FORCE_INLINE const transform_type& transform () const { return parent_.transform(); } - FORCE_INLINE const std::map& keyval () const { return parent_.keyval(); } + FORCE_INLINE const KeyValues& keyval () const { return parent_.keyval(); } FORCE_INLINE ssize_t get_index (size_t axis) const { return parent_.index (axis); } FORCE_INLINE void move_index (size_t axis, ssize_t increment) { parent_.index (axis) += increment; } diff --git a/core/adapter/extract.h b/core/adapter/extract.h index 0bcdaca2fd..6af88e7855 100644 --- a/core/adapter/extract.h +++ b/core/adapter/extract.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __adapter_extract_h__ #define __adapter_extract_h__ diff --git a/core/adapter/gaussian1D.h b/core/adapter/gaussian1D.h index 697736b57f..a67f0f07b9 100644 --- a/core/adapter/gaussian1D.h +++ b/core/adapter/gaussian1D.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __image_adapter_gaussian1D_h__ #define __image_adapter_gaussian1D_h__ diff --git a/core/adapter/gradient1D.h b/core/adapter/gradient1D.h index f756d1a717..cdd9f3e012 100644 --- a/core/adapter/gradient1D.h +++ b/core/adapter/gradient1D.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __image_adapter_gradient1D_h__ #define __image_adapter_gradient1D_h__ diff --git a/core/adapter/gradient3D.h b/core/adapter/gradient3D.h index fd8ef4720e..f2746c5463 100644 --- a/core/adapter/gradient3D.h +++ b/core/adapter/gradient3D.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __image_adapter_gradient3D_h__ #define __image_adapter_gradient3D_h__ diff --git a/core/adapter/jacobian.h b/core/adapter/jacobian.h index faa883a36c..0b562aa2d7 100644 --- a/core/adapter/jacobian.h +++ b/core/adapter/jacobian.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __image_adapter_jacobian_h__ #define __image_adapter_jacobian_h__ diff --git a/core/adapter/median.h b/core/adapter/median.h index 1963e1f348..e0eb649933 100644 --- a/core/adapter/median.h +++ b/core/adapter/median.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __image_adapter_median_h__ #define __image_adapter_median_h__ diff --git a/core/adapter/neighbourhood3D.h b/core/adapter/neighbourhood3D.h index 98551a8632..27c44ffac8 100644 --- a/core/adapter/neighbourhood3D.h +++ b/core/adapter/neighbourhood3D.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __adapter_neighbourhood_h__ #define __adapter_neighbourhood_h__ diff --git a/core/adapter/normalise3D.h b/core/adapter/normalise3D.h index be35da448b..ac821e150e 100644 --- a/core/adapter/normalise3D.h +++ b/core/adapter/normalise3D.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __image_adapter_normalise3D_h__ #define __image_adapter_normalise3D_h__ diff --git a/core/adapter/permute_axes.h b/core/adapter/permute_axes.h index bbc73530b1..4fbb621a08 100644 --- a/core/adapter/permute_axes.h +++ b/core/adapter/permute_axes.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __adapter_permute_axes_h__ #define __adapter_permute_axes_h__ diff --git a/core/adapter/regrid.h b/core/adapter/regrid.h new file mode 100644 index 0000000000..365c39ce98 --- /dev/null +++ b/core/adapter/regrid.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * + * MRtrix3 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/ + */ + + +#ifndef __adapter_regrid_h__ +#define __adapter_regrid_h__ + +#include "image.h" +#include "adapter/base.h" + +namespace MR +{ + namespace Adapter { + + template + class Regrid : + public Base,ImageType> + { MEMALIGN(Regrid) + public: + + using base_type = Base, ImageType>; + using value_type = typename ImageType::value_type; + + using base_type::name; + using base_type::spacing; + + template + Regrid (const ImageType& original, const VectorType& from, const VectorType& size, const value_type fill = 0) : + base_type (original), + from_ (container_cast(from)), + size_ (container_cast(size)), + index_invalid_lower_upper ([&]{ vector> v; for (size_t d = 0; d < from_.size(); ++d) { + v.push_back (vector {from_[d] < 0 ? -(ssize_t) from_[d] - 1 : -1, original.size(d) - from_[d]}); } return v; }()), + index_requires_bound_check ([&]{ vector v; for (size_t d = 0; d < from_.size(); ++d) { + v.push_back (from_[d] < 0 || size_[d] > original.size(d) - from_[d]); } return v; }()), + fill_ (fill), + transform_ (original.transform()), + index_ (ndim(), 0) { + assert(from_.size() == size_.size()); + assert(from_.size() == ndim()); + + for (size_t n = 0; n < ndim(); ++n) + if (size_[n] < 0) + throw Exception("FIXME: negative size in Regrid adapter"); + + // adjust location of origin + for (size_t j = 0; j < 3; ++j) + for (size_t i = 0; i < 3; ++i) + transform_(i,3) += from[j] * spacing(j) * transform_(i,j); + + } + + void reset () { + for (size_t n = 0; n < ndim(); ++n) + set_pos (n, 0); + } + + size_t ndim () const { return size_.size(); } + ssize_t size (size_t axis) const { return size_ [axis]; } + const transform_type& transform() const { return transform_; } + + ssize_t get_index (size_t axis) const { + return index_requires_bound_check[axis] ? index_[axis] : parent().index(axis) - from_[axis]; + } + + void move_index (size_t axis, ssize_t increment) { + if (index_requires_bound_check[axis]) { + index_[axis] += increment; + if (increment > 0) { + if (index_[axis] < index_invalid_lower_upper[axis][1]) + parent().index(axis) = index_[axis] + from_[axis]; + } else + if (index_[axis] > index_invalid_lower_upper[axis][0]) + parent().index(axis) = index_[axis] + from_[axis]; + } else + parent().index(axis) += increment; + } + + value_type value () { + for (size_t axis = 0; axis < index_.size(); ++axis) + if (index_requires_bound_check[axis] && + (index_[axis] >= index_invalid_lower_upper[axis][1] || index_[axis] <= index_invalid_lower_upper[axis][0])) + return fill_; + return parent().value(); + } + + protected: + using base_type::parent; + const vector from_, size_; + const vector> index_invalid_lower_upper; + const vector index_requires_bound_check; + const value_type fill_; + transform_type transform_; + vector index_; + }; + + } +} + +#endif + + diff --git a/core/adapter/replicate.h b/core/adapter/replicate.h index d0ae16bbec..5ddaea0822 100644 --- a/core/adapter/replicate.h +++ b/core/adapter/replicate.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __adapter_replicate_h__ #define __adapter_replicate_h__ diff --git a/core/adapter/reslice.cpp b/core/adapter/reslice.cpp index 2f4a80ee2c..ad21975f7e 100644 --- a/core/adapter/reslice.cpp +++ b/core/adapter/reslice.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "adapter/reslice.h" namespace MR diff --git a/core/adapter/reslice.h b/core/adapter/reslice.h index 282762cebc..f8536bc900 100644 --- a/core/adapter/reslice.h +++ b/core/adapter/reslice.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __adapter_reslice_h__ #define __adapter_reslice_h__ diff --git a/core/adapter/subset.h b/core/adapter/subset.h index eef7ed37f2..2d6571128a 100644 --- a/core/adapter/subset.h +++ b/core/adapter/subset.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __adapter_subset_h__ #define __adapter_subset_h__ @@ -45,7 +46,7 @@ namespace MR for (size_t n = 0; n < ndim(); ++n) { if (size_[n] < 1) throw Exception ("FIXME: sizes requested for Subset adapter must be positive"); - if (from_[n] + size_[n] > original.size(n)) + if (from_[n] + size_[n] > original.size(n) || from_[n] < 0) throw Exception ("FIXME: dimensions requested for Subset adapter are out of bounds!"); } diff --git a/core/adapter/warp.h b/core/adapter/warp.h index 7965c2aed4..4f5bf73756 100644 --- a/core/adapter/warp.h +++ b/core/adapter/warp.h @@ -1,24 +1,26 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __adapter_warp_h__ #define __adapter_warp_h__ #include "image.h" #include "transform.h" #include "interp/cubic.h" +#include "adapter/jacobian.h" namespace MR { @@ -55,13 +57,16 @@ namespace MR Warp (const ImageType& original, const WarpType& warp, - const value_type value_when_out_of_bounds = Interpolator::default_out_of_bounds_value()) : + const value_type value_when_out_of_bounds = Interpolator::default_out_of_bounds_value(), + const bool jacobian_modulate = false) : interp (original, value_when_out_of_bounds), warp (warp), x { 0, 0, 0 }, dim { warp.size(0), warp.size(1), warp.size(2) }, vox { warp.spacing(0), warp.spacing(1), warp.spacing(2) }, - value_when_out_of_bounds (value_when_out_of_bounds) { + value_when_out_of_bounds (value_when_out_of_bounds), + jac_modulate (jacobian_modulate), + jacobian_adapter (warp, true) { assert (warp.ndim() == 4); assert (warp.size(3) == 3); } @@ -89,7 +94,13 @@ namespace MR if (std::isnan(pos[0]) || std::isnan(pos[1]) || std::isnan(pos[2])) return value_when_out_of_bounds; interp.scanner (pos); - return interp.value(); + default_type val = interp.value(); + if (jac_modulate && val != 0.0) { + for (size_t dim = 0; dim < 3; ++dim) + jacobian_adapter.index(dim) = x[dim]; + val *= jacobian_adapter.value().template cast().determinant(); + } + return (value_type) val; } ssize_t get_index (size_t axis) const { return axis < 3 ? x[axis] : interp.index(axis); } @@ -113,7 +124,9 @@ namespace MR ssize_t x[3]; const ssize_t dim[3]; const default_type vox[3]; - value_type value_when_out_of_bounds; + const value_type value_when_out_of_bounds; + const bool jac_modulate; + Adapter::Jacobian > jacobian_adapter; }; //! @} diff --git a/core/algo/copy.h b/core/algo/copy.h index a0d16f2d5e..9c5cdf5588 100644 --- a/core/algo/copy.h +++ b/core/algo/copy.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_copy_h__ #define __algo_copy_h__ diff --git a/core/algo/histogram.cpp b/core/algo/histogram.cpp index e99c89dc3d..898fd7948d 100644 --- a/core/algo/histogram.cpp +++ b/core/algo/histogram.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "algo/histogram.h" namespace MR diff --git a/core/algo/histogram.h b/core/algo/histogram.h index da946b932f..d6db876c07 100644 --- a/core/algo/histogram.h +++ b/core/algo/histogram.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_histogram_h__ #define __algo_histogram_h__ @@ -20,6 +21,7 @@ #include "image_helpers.h" #include "types.h" +#include "adapter/replicate.h" #include "algo/loop.h" namespace MR @@ -157,10 +159,11 @@ namespace MR calibrate (result, image); return; } - if (!dimensions_match (image, mask)) - throw Exception ("Image and mask for histogram generation do not match"); - for (auto l = Loop(image) (image, mask); l; ++l) { - if (mask.value()) + if (!dimensions_match (image, mask, 0, 3)) + throw Exception ("Image and mask for histogram calibration do not match"); + Adapter::Replicate mask_replicate (mask, image); + for (auto l = Loop(image) (image, mask_replicate); l; ++l) { + if (mask_replicate.value()) result (image.value()); } result.finalize (image.ndim() > 3 ? image.size(3) : 1, std::is_integral::value); @@ -196,9 +199,12 @@ namespace MR { if (!mask.valid()) return generate (calibrator, image); + if (!dimensions_match (image, mask, 0, 3)) + throw Exception ("Image and mask for histogram generation do not match"); Data result (calibrator); - for (auto l = Loop(image) (image, mask); l; ++l) { - if (mask.value()) + Adapter::Replicate mask_replicate (mask, image); + for (auto l = Loop(image) (image, mask_replicate); l; ++l) { + if (mask_replicate.value()) result (typename ImageType::value_type (image.value())); } return result; diff --git a/core/algo/iterator.h b/core/algo/iterator.h index 1f541a222b..3cd170e4fb 100644 --- a/core/algo/iterator.h +++ b/core/algo/iterator.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_iterator_h__ #define __algo_iterator_h__ diff --git a/core/algo/loop.h b/core/algo/loop.h index 55cf6d7da2..73e7030056 100644 --- a/core/algo/loop.h +++ b/core/algo/loop.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_loop_h__ #define __algo_loop_h__ @@ -31,7 +32,7 @@ namespace MR struct set_pos { NOMEMALIGN FORCE_INLINE set_pos (size_t axis, ssize_t index) : axis (axis), index (index) { } - template + template FORCE_INLINE void operator() (ImageType& vox) { vox.index(axis) = index; } size_t axis; ssize_t index; @@ -39,7 +40,7 @@ namespace MR struct inc_pos { NOMEMALIGN FORCE_INLINE inc_pos (size_t axis) : axis (axis) { } - template + template FORCE_INLINE void operator() (ImageType& vox) { ++vox.index(axis); } size_t axis; }; @@ -49,10 +50,10 @@ namespace MR //! \endcond /** \defgroup loop Looping functions - * + * * These functions can be used to loop over any number of axes of one of more * `ImageType`, in any specified order, within the same thread of execution - * (for multi-threaded applications, see ThreadedLoop()). + * (for multi-threaded applications, see ThreadedLoop()). * * Looping over a single axis {#loop_single_axis} * ========================== @@ -74,13 +75,13 @@ namespace MR * * - the `operator()` method of the returned object accepts any number of * `ImageType` objects, each of which will have its position incremented as - * expected at each iteration. + * expected at each iteration. * * - this returns another opaque object that will perfom the looping proper * (again, the C++11 `auto` keyword is invaluable here). This is assigned * to a local variable, which can be used to test for completion of the * loop (via its `operator bool()` method), and to increment the position - * of the `ImageType` objects involved (with its `operator++()` methods). + * of the `ImageType` objects involved (with its `operator++()` methods). * * * Looping with smallest stride first {#loop_smallest_stride} @@ -139,7 +140,7 @@ namespace MR * vector order = { 1, 0, 2 }; * * LoopInOrder loop (vox, order); - * for (auto i = loop.run (vox); i; ++i) + * for (auto i = loop.run (vox); i; ++i) * value += std::exp (-vox.value()); * \endcode * This will iterate over the axes in the same order as the first example @@ -155,7 +156,7 @@ namespace MR * with the looping order optimised for the \a src ImageType: * \code * LoopInOrder loop (src); - * for (auto i = loop.run(src, dest); i; ++i) + * for (auto i = loop.run(src, dest); i; ++i) * dest.value() = src.value(); * \endcode * @@ -192,7 +193,7 @@ namespace MR const size_t axis; const std::tuple vox; const ssize_t size0; - FORCE_INLINE Run (const size_t axis, const std::tuple& vox) : + FORCE_INLINE Run (const size_t axis, const std::tuple& vox) : axis (axis), vox (vox), size0 (std::get<0>(vox).size(axis)) { apply (set_pos (axis, 0), vox); } FORCE_INLINE operator bool() const { return std::get<0>(vox).index(axis) < size0; } FORCE_INLINE void operator++() const { apply (inc_pos (axis), vox); } @@ -213,7 +214,7 @@ namespace MR const size_t axis; const std::tuple vox; const ssize_t size0; - FORCE_INLINE Run (const std::string& text, const size_t axis, const std::tuple& vox) : + FORCE_INLINE Run (const std::string& text, const size_t axis, const std::tuple& vox) : progress (text, std::get<0>(vox).size(axis)), axis (axis), vox (vox), size0 (std::get<0>(vox).size(axis)) { apply (set_pos (axis, 0), vox); } FORCE_INLINE operator bool() const { return std::get<0>(vox).index(axis) < size0; } FORCE_INLINE void operator++() { apply (inc_pos (axis), vox); ++progress; } @@ -235,14 +236,14 @@ namespace MR const std::tuple vox; const ssize_t size0; bool ok; - FORCE_INLINE Run (const size_t axis_from, const size_t axis_to, const std::tuple& vox) : - from (axis_from), to (axis_to ? axis_to : std::get<0>(vox).ndim()), vox (vox), size0 (std::get<0>(vox).size(from)), ok (true) { + FORCE_INLINE Run (const size_t axis_from, const size_t axis_to, const std::tuple& vox) : + from (axis_from), to (axis_to ? axis_to : std::get<0>(vox).ndim()), vox (vox), size0 (std::get<0>(vox).size(from)), ok (true) { for (size_t n = from; n < to; ++n) - apply (set_pos (n, 0), vox); + apply (set_pos (n, 0), vox); } FORCE_INLINE operator bool() const { return ok; } - FORCE_INLINE void operator++() { - apply (inc_pos (from), vox); + FORCE_INLINE void operator++() { + apply (inc_pos (from), vox); if (std::get<0>(vox).index(from) < size0) return; @@ -272,7 +273,7 @@ namespace MR template struct Run : public LoopAlongAxisRange::Run { NOMEMALIGN MR::ProgressBar progress; - FORCE_INLINE Run (const std::string& text, const size_t from, const size_t to, const std::tuple& vox) : + FORCE_INLINE Run (const std::string& text, const size_t from, const size_t to, const std::tuple& vox) : LoopAlongAxisRange::Run (from, to, vox), progress (text, MR::voxel_count (std::get<0>(vox), from, to)) { } FORCE_INLINE void operator++() { LoopAlongAxisRange::Run::operator++(); ++progress; } FORCE_INLINE void operator++(int) { operator++(); } @@ -307,14 +308,14 @@ namespace MR const size_t from; const ssize_t size0; bool ok; - FORCE_INLINE Run (const std::initializer_list axes, const std::tuple& vox) : - axes (axes), vox (vox), from (*axes.begin()), size0 (std::get<0>(vox).size(from)), ok (true) { + FORCE_INLINE Run (const std::initializer_list axes, const std::tuple& vox) : + axes (axes), vox (vox), from (*axes.begin()), size0 (std::get<0>(vox).size(from)), ok (true) { for (auto axis : axes) - apply (set_pos (axis, 0), vox); + apply (set_pos (axis, 0), vox); } FORCE_INLINE operator bool() const { return ok; } - FORCE_INLINE void operator++() { - apply (inc_pos (from), vox); + FORCE_INLINE void operator++() { + apply (inc_pos (from), vox); if (std::get<0>(vox).index(from) < size0) return; @@ -326,7 +327,7 @@ namespace MR return; apply (set_pos (*axis, 0), vox); ++axis; - } + } ok = false; } FORCE_INLINE void operator++(int) { operator++(); } @@ -338,13 +339,13 @@ namespace MR struct LoopAlongStaticAxesProgress : public LoopAlongStaticAxes { NOMEMALIGN const std::string text; - LoopAlongStaticAxesProgress (const std::string& text, const std::initializer_list axes) : + LoopAlongStaticAxesProgress (const std::string& text, const std::initializer_list axes) : LoopAlongStaticAxes ({ axes }), text (text) { } template struct Run : public LoopAlongStaticAxes::Run { NOMEMALIGN MR::ProgressBar progress; - FORCE_INLINE Run (const std::string& text, const std::initializer_list axes, const std::tuple& vox) : + FORCE_INLINE Run (const std::string& text, const std::initializer_list axes, const std::tuple& vox) : LoopAlongStaticAxes::Run (axes, vox), progress (text, MR::voxel_count (std::get<0>(vox), axes)) { } FORCE_INLINE void operator++() { LoopAlongStaticAxes::Run::operator++(); ++progress; } FORCE_INLINE void operator++(int) { operator++(); } @@ -366,14 +367,14 @@ namespace MR const size_t from; const ssize_t size0; bool ok; - FORCE_INLINE Run (const vector& axes, const std::tuple& vox) : - axes (axes), vox (vox), from (axes[0]), size0 (std::get<0>(vox).size(from)), ok (true) { + FORCE_INLINE Run (const vector& axes, const std::tuple& vox) : + axes (axes), vox (vox), from (axes[0]), size0 (std::get<0>(vox).size(from)), ok (true) { for (auto axis : axes) - apply (set_pos (axis, 0), vox); + apply (set_pos (axis, 0), vox); } FORCE_INLINE operator bool() const { return ok; } - FORCE_INLINE void operator++() { - apply (inc_pos (from), vox); + FORCE_INLINE void operator++() { + apply (inc_pos (from), vox); if (std::get<0>(vox).index(from) < size0) return; @@ -381,7 +382,7 @@ namespace MR while (axis != axes.cend()) { apply (set_pos (*(axis-1), 0), vox); apply (inc_pos (*axis), vox); - if (std::get<0>(vox).index(*axis) < std::get<0>(vox).size(*axis)) + if (std::get<0>(vox).index(*axis) < std::get<0>(vox).size(*axis)) return; ++axis; } @@ -396,13 +397,13 @@ namespace MR struct LoopAlongDynamicAxesProgress : public LoopAlongDynamicAxes { NOMEMALIGN const std::string text; - LoopAlongDynamicAxesProgress (const std::string& text, const vector& axes) : + LoopAlongDynamicAxesProgress (const std::string& text, const vector& axes) : LoopAlongDynamicAxes ({ axes }), text (text) { } template struct Run : public LoopAlongDynamicAxes::Run { NOMEMALIGN MR::ProgressBar progress; - FORCE_INLINE Run (const std::string& text, const vector& axes, const std::tuple& vox) : + FORCE_INLINE Run (const std::string& text, const vector& axes, const std::tuple& vox) : LoopAlongDynamicAxes::Run (axes, vox), progress (text, MR::voxel_count (std::get<0>(vox), axes)) { } FORCE_INLINE void operator++() { LoopAlongDynamicAxes::Run::operator++(); ++progress; } FORCE_INLINE void operator++(int) { operator++(); } @@ -414,29 +415,60 @@ namespace MR - FORCE_INLINE LoopAlongAxes Loop () { return { }; } - FORCE_INLINE LoopAlongAxesProgress Loop (const std::string& progress_message) { return { progress_message }; } - FORCE_INLINE LoopAlongSingleAxis Loop (size_t axis) { return { axis }; } - FORCE_INLINE LoopAlongSingleAxisProgress Loop (const std::string& progress_message, size_t axis) { return { progress_message, axis }; } - FORCE_INLINE LoopAlongAxisRange Loop (size_t axis_from, size_t axis_to) { return { axis_from, axis_to }; } - FORCE_INLINE LoopAlongAxisRangeProgress Loop (const std::string& progress_message, size_t axis_from, size_t axis_to) { return { progress_message, axis_from, axis_to }; } - FORCE_INLINE LoopAlongStaticAxes Loop (std::initializer_list axes) { return { axes }; } - FORCE_INLINE LoopAlongStaticAxesProgress Loop (const std::string& progress_message, std::initializer_list axes) { return { progress_message, axes }; } - FORCE_INLINE LoopAlongDynamicAxes Loop (const vector& axes) { return { axes }; } - FORCE_INLINE LoopAlongDynamicAxesProgress Loop (const std::string& progress_message, const vector& axes) { return { progress_message, axes }; } - - template - FORCE_INLINE LoopAlongDynamicAxes - Loop (const ImageType& source, size_t axis_from = 0, size_t axis_to = std::numeric_limits::max(), - typename std::enable_if::value && !std::is_same::value, int>::type = 0) { - return { Stride::order (source, axis_from, axis_to) }; - } - template - FORCE_INLINE LoopAlongDynamicAxesProgress - Loop (const std::string& progress_message, const ImageType& source, size_t axis_from = 0, size_t axis_to = std::numeric_limits::max(), - typename std::enable_if::value && !std::is_same::value, int>::type = 0) { - return { progress_message, Stride::order (source, axis_from, axis_to) }; - } + FORCE_INLINE LoopAlongAxes + Loop () + { return { }; } + + FORCE_INLINE LoopAlongAxesProgress + Loop (const std::string& progress_message) + { return { progress_message }; } + + FORCE_INLINE LoopAlongSingleAxis + Loop (size_t axis) + { return { axis }; } + + FORCE_INLINE LoopAlongSingleAxisProgress + Loop (const std::string& progress_message, size_t axis) + { return { progress_message, axis }; } + + FORCE_INLINE LoopAlongAxisRange + Loop (size_t axis_from, size_t axis_to) + { return { axis_from, axis_to }; } + + FORCE_INLINE LoopAlongAxisRangeProgress + Loop (const std::string& progress_message, size_t axis_from, size_t axis_to) + { return { progress_message, axis_from, axis_to }; } + + FORCE_INLINE LoopAlongStaticAxes + Loop (std::initializer_list axes) + { return { axes }; } + + FORCE_INLINE LoopAlongStaticAxesProgress + Loop (const std::string& progress_message, std::initializer_list axes) + { return { progress_message, axes }; } + + FORCE_INLINE LoopAlongDynamicAxes + Loop (const vector& axes) + { return { axes }; } + + FORCE_INLINE LoopAlongDynamicAxesProgress + Loop (const std::string& progress_message, const vector& axes) + { return { progress_message, axes }; } + + template FORCE_INLINE LoopAlongDynamicAxes + Loop (const ImageType& source, + size_t axis_from = 0, + size_t axis_to = std::numeric_limits::max(), + typename std::enable_if::value && !std::is_same::value, int>::type = 0) + { return { Stride::order (source, axis_from, axis_to) }; } + + template FORCE_INLINE LoopAlongDynamicAxesProgress + Loop (const std::string& progress_message, + const ImageType& source, + size_t axis_from = 0, + size_t axis_to = std::numeric_limits::max(), + typename std::enable_if::value && !std::is_same::value, int>::type = 0) + { return { progress_message, Stride::order (source, axis_from, axis_to) }; } //! @} diff --git a/core/algo/min_max.h b/core/algo/min_max.h index f537f479a0..2a31d31965 100644 --- a/core/algo/min_max.h +++ b/core/algo/min_max.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __image_min_max_h__ #define __image_min_max_h__ @@ -28,17 +29,17 @@ namespace MR public: using value_type = typename ImageType::value_type; - __MinMax (value_type& overal_min, value_type& overal_max) : - overal_min (overal_min), overal_max (overal_max), - min (std::numeric_limits::infinity()), - max (-std::numeric_limits::infinity()) { - overal_min = min; - overal_max = max; + __MinMax (value_type& overall_min, value_type& overall_max) : + overall_min (overall_min), overall_max (overall_max), + min (std::numeric_limits::infinity()), + max (-std::numeric_limits::infinity()) { + overall_min = min; + overall_max = max; } ~__MinMax () { std::lock_guard lock (mutex); - overal_min = std::min (overal_min, min); - overal_max = std::max (overal_max, max); + overall_min = std::min (overall_min, min); + overall_max = std::max (overall_max, max); } void operator() (ImageType& vox) { @@ -49,8 +50,19 @@ namespace MR } } - value_type& overal_min; - value_type& overal_max; + template + void operator() (ImageType& vox, MaskType& mask) { + if (mask.value()) { + value_type val = vox.value(); + if (std::isfinite (val)) { + if (val < min) min = val; + if (val > max) max = val; + } + } + } + + value_type& overall_min; + value_type& overall_max; value_type min, max; static std::mutex mutex; @@ -60,17 +72,30 @@ namespace MR //! \endcond template - inline void min_max ( - ImageType& in, - typename ImageType::value_type& min, - typename ImageType::value_type& max, - size_t from_axis = 0, - size_t to_axis = std::numeric_limits::max()) + inline void min_max ( + ImageType& in, + typename ImageType::value_type& min, + typename ImageType::value_type& max, + size_t from_axis = 0, + size_t to_axis = std::numeric_limits::max()) { - ThreadedLoop ("finding min/max of \"" + shorten (in.name()) + "\"", in) + ThreadedLoop ("finding min/max of \"" + shorten (in.name()) + "\"", in, from_axis, to_axis) .run (__MinMax (min, max), in); } + template + inline void min_max ( + ImageType& in, + MaskType& mask, + typename ImageType::value_type& min, + typename ImageType::value_type& max, + size_t from_axis = 0, + size_t to_axis = std::numeric_limits::max()) + { + ThreadedLoop ("finding min/max of \"" + shorten (in.name()) + "\"", in, from_axis, to_axis) + .run (__MinMax (min, max), in, mask); + } + } #endif diff --git a/core/algo/neighbourhooditerator.h b/core/algo/neighbourhooditerator.h index da884ae937..6ac27c34dc 100644 --- a/core/algo/neighbourhooditerator.h +++ b/core/algo/neighbourhooditerator.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_neighbourhooditerator_h__ #define __algo_neighbourhooditerator_h__ diff --git a/core/algo/random_loop.h b/core/algo/random_loop.h index 01bb2821c9..189b5e4032 100644 --- a/core/algo/random_loop.h +++ b/core/algo/random_loop.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_random_loop_h__ #define __algo_random_loop_h__ diff --git a/core/algo/random_threaded_loop.h b/core/algo/random_threaded_loop.h index 6aa752973d..2bbb125c05 100644 --- a/core/algo/random_threaded_loop.h +++ b/core/algo/random_threaded_loop.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_random_threaded_loop_h__ #define __algo_random_threaded_loop_h__ @@ -146,7 +147,7 @@ namespace MR template void run_outer (Functor&& functor, const double voxel_density, const vector& dimensions) { - if (Thread::number_of_threads() == 0) { + if (Thread::threads_to_execute() == 0) { for (auto i = outer_loop (iterator); i; ++i){ // std::cerr << "outer: " << str(iterator) << " " << voxel_density << " " << dimensions << std::endl; functor (iterator); diff --git a/core/algo/stochastic_threaded_loop.h b/core/algo/stochastic_threaded_loop.h index 74fd88aad0..cf3e228622 100644 --- a/core/algo/stochastic_threaded_loop.h +++ b/core/algo/stochastic_threaded_loop.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_stochastic_threaded_loop_h__ #define __algo_stochastic_threaded_loop_h__ @@ -106,7 +107,7 @@ namespace MR template void run_outer (Functor&& functor, const double voxel_density) { - if (Thread::number_of_threads() == 0) { + if (Thread::threads_to_execute() == 0) { for (auto i = outer_loop (iterator); i; ++i){ // std::cerr << "outer: " << str(iterator) << " " << voxel_density << std::endl; functor (iterator); diff --git a/core/algo/threaded_copy.h b/core/algo/threaded_copy.h index eea448b0b3..3fa9ec12b2 100644 --- a/core/algo/threaded_copy.h +++ b/core/algo/threaded_copy.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_threaded_copy_h__ #define __algo_threaded_copy_h__ diff --git a/core/algo/threaded_loop.h b/core/algo/threaded_loop.h index 8ddc11ae87..a52865fe5d 100644 --- a/core/algo/threaded_loop.h +++ b/core/algo/threaded_loop.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __algo_threaded_loop_h__ #define __algo_threaded_loop_h__ @@ -303,6 +304,15 @@ namespace MR }; + inline void __manage_progress (...) { } + template + inline auto __manage_progress (const LoopType* loop, const ThreadType* threads) + -> decltype((void) (&loop->progress), void()) + { + loop->progress.run_update_thread (*threads); + } + + template struct ThreadedLoopRunOuter { MEMALIGN(ThreadedLoopRunOuter) Iterator iterator; @@ -313,13 +323,14 @@ namespace MR template void run_outer (Functor&& functor) { - if (Thread::number_of_threads() == 0) { + if (Thread::threads_to_execute() == 0) { for (auto i = outer_loop (iterator); i; ++i) functor (iterator); return; } std::mutex mutex; + ProgressBar::SwitchToMultiThreaded progress_functions; struct Shared { MEMALIGN(Shared) Iterator& iterator; @@ -346,7 +357,10 @@ namespace MR } } loop_thread = { shared, functor }; - Thread::run (Thread::multi (loop_thread), "loop threads").wait(); + auto threads = Thread::run (Thread::multi (loop_thread), "loop threads"); + + __manage_progress (&shared.loop, &threads); + threads.wait(); } @@ -376,74 +390,80 @@ namespace MR //! Multi-threaded loop object //* \sa image_thread_looping for details */ template - inline ThreadedLoopRunOuter()))> ThreadedLoop ( + inline ThreadedLoopRunOuter()))> + ThreadedLoop ( const HeaderType& source, const vector& outer_axes, - const vector& inner_axes) { - return { source, Loop (outer_axes), inner_axes }; - } + const vector& inner_axes) + { return { source, Loop (outer_axes), inner_axes }; } //! Multi-threaded loop object //* \sa image_thread_looping for details */ template - inline ThreadedLoopRunOuter()))> ThreadedLoop ( + inline ThreadedLoopRunOuter()))> + ThreadedLoop ( const HeaderType& source, const vector& axes, - size_t num_inner_axes = 1) { - return { source, Loop (get_outer_axes (axes, num_inner_axes)), get_inner_axes (axes, num_inner_axes) }; - } + size_t num_inner_axes = 1) + { return { source, Loop (get_outer_axes (axes, num_inner_axes)), get_inner_axes (axes, num_inner_axes) }; } //! Multi-threaded loop object //* \sa image_thread_looping for details */ template - inline ThreadedLoopRunOuter()))> ThreadedLoop ( + inline ThreadedLoopRunOuter()))> + ThreadedLoop ( const HeaderType& source, size_t from_axis = 0, size_t to_axis = std::numeric_limits::max(), - size_t num_inner_axes = 1) { + size_t num_inner_axes = 1) + { return { source, Loop (get_outer_axes (source, num_inner_axes, from_axis, to_axis)), get_inner_axes (source, num_inner_axes, from_axis, to_axis) }; - } + } //! Multi-threaded loop object //* \sa image_thread_looping for details */ template - inline ThreadedLoopRunOuter()))> ThreadedLoop ( + inline ThreadedLoopRunOuter()))> + ThreadedLoop ( const std::string& progress_message, const HeaderType& source, const vector& outer_axes, - const vector& inner_axes) { - return { source, Loop (progress_message, outer_axes), inner_axes }; - } + const vector& inner_axes) + { return { source, Loop (progress_message, outer_axes), inner_axes }; } //! Multi-threaded loop object //* \sa image_thread_looping for details */ template - inline ThreadedLoopRunOuter()))> ThreadedLoop ( + inline ThreadedLoopRunOuter()))> + ThreadedLoop ( const std::string& progress_message, const HeaderType& source, const vector& axes, - size_t num_inner_axes = 1) { + size_t num_inner_axes = 1) + { return { source, Loop (progress_message, get_outer_axes (axes, num_inner_axes)), get_inner_axes (axes, num_inner_axes) }; - } + } //! Multi-threaded loop object //* \sa image_thread_looping for details */ template - inline ThreadedLoopRunOuter()))> ThreadedLoop ( + inline ThreadedLoopRunOuter()))> + ThreadedLoop ( const std::string& progress_message, const HeaderType& source, size_t from_axis = 0, size_t to_axis = std::numeric_limits::max(), - size_t num_inner_axes = 1) { + size_t num_inner_axes = 1) + { return { source, Loop (progress_message, get_outer_axes (source, num_inner_axes, from_axis, to_axis)), get_inner_axes (source, num_inner_axes, from_axis, to_axis) }; - } + } diff --git a/core/app.cpp b/core/app.cpp index 78f80563d5..a864a2ae2d 100644 --- a/core/app.cpp +++ b/core/app.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include #include @@ -34,6 +35,11 @@ #define HELP_PURPOSE_INDENT 0, 4 #define HELP_ARG_INDENT 8, 20 #define HELP_OPTION_INDENT 2, 20 +#define HELP_EXAMPLE_INDENT 7 + +#define MRTRIX_CORE_REFERENCE "Tournier, J.-D.; Smith, R. E.; Raffelt, D.; Tabbara, R.; Dhollander, T.; Pietsch, M.; Christiaens, D.; Jeurissen, B.; Yeh, C.-H. & Connelly, A. " \ +"MRtrix3: A fast, flexible and open software framework for medical image processing and visualisation. " \ +"NeuroImage, 2019, 202, 116137" namespace MR @@ -43,6 +49,7 @@ namespace MR { Description DESCRIPTION; + ExampleList EXAMPLES; ArgumentList ARGUMENTS; OptionList OPTIONS; Description REFERENCES; @@ -50,38 +57,50 @@ namespace MR OptionGroup __standard_options = OptionGroup ("Standard options") + Option ("info", "display information messages.") - + Option ("quiet", "do not display information messages or progress status. Alternatively, this can be achieved by setting the MRTRIX_QUIET environment variable to a non-empty string.") + + Option ("quiet", "do not display information messages or progress status; " + "alternatively, this can be achieved by setting the MRTRIX_QUIET environment variable to a non-empty string.") + Option ("debug", "display debugging messages.") - + Option ("force", "force overwrite of output files. " - "Caution: Using the same file as input and output might cause unexpected behaviour.") + + Option ("force", "force overwrite of output files " + "(caution: using the same file as input and output might cause unexpected behaviour).") + Option ("nthreads", "use this number of threads in multi-threaded applications (set to 0 to disable multi-threading).") + Argument ("number").type_integer (0) + + Option ("config", "temporarily set the value of an MRtrix config file entry.").allow_multiple() + + Argument ("key").type_text() + + Argument ("value").type_text() + Option ("help", "display this information page and exit.") + Option ("version", "display version information and exit."); const char* AUTHOR = nullptr; const char* COPYRIGHT = - "Copyright (c) 2008-2018 the MRtrix3 contributors." - "\n\n" + "Copyright (c) 2008-2019 the MRtrix3 contributors.\n" + "\n" "This Source Code Form is subject to the terms of the Mozilla Public\n" "License, v. 2.0. If a copy of the MPL was not distributed with this\n" - "file, you can obtain one at http://mozilla.org/MPL/2.0/\n" + "file, You can obtain one at http://mozilla.org/MPL/2.0/.\n" "\n" - "MRtrix3 is distributed in the hope that it will be useful,\n" - "but WITHOUT ANY WARRANTY; without even the implied warranty\n" - "of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n" + "Covered Software is provided under this License on an \"as is\"\n" + "basis, without warranty of any kind, either expressed, implied, or\n" + "statutory, including, without limitation, warranties that the\n" + "Covered Software is free of defects, merchantable, fit for a\n" + "particular purpose or non-infringing.\n" + "See the Mozilla Public License v. 2.0 for more details.\n" "\n" - "For more details, see http://www.mrtrix.org/\n"; + "For more details, see http://www.mrtrix.org/.\n"; const char* SYNOPSIS = nullptr; std::string NAME; + std::string command_history_string; vector argument; vector option; + //ENVVAR name: MRTRIX_QUIET + //ENVVAR Do not display information messages or progress status. This has + //ENVVAR the same effect as the ``-quiet`` command-line option. int log_level = getenv("MRTRIX_QUIET") ? 0 : 1; int exit_error_code = 0; bool fail_on_warn = false; bool terminal_use_colour = true; + const std::thread::id main_thread_ID = std::this_thread::get_id(); const char* project_version = nullptr; const char* project_build_date = nullptr; @@ -159,13 +178,17 @@ namespace MR } - std::string underline (const std::string& text) + std::string underline (const std::string& text, bool ignore_whitespace = false) { + size_t m (0); std::string retval (3*text.size(), '\0'); for (size_t n = 0; n < text.size(); ++n) { - retval[3*n] = '_'; - retval[3*n+1] = 0x08U; - retval[3*n+2] = text[n]; + if (ignore_whitespace and text[n]==' ') + retval[m++] = ' '; + else + retval[m++] = '_'; + retval[m++] = 0x08U; + retval[m++] = text[n]; } return retval; } @@ -249,7 +272,7 @@ namespace MR { if (!format) return SYNOPSIS; - return bold("SYNOPSIS") + "\n" + paragraph ("", SYNOPSIS, HELP_PURPOSE_INDENT) + "\n"; + return bold("SYNOPSIS") + "\n\n" + paragraph ("", SYNOPSIS, HELP_PURPOSE_INDENT) + "\n"; } @@ -266,16 +289,44 @@ namespace MR + bold ("COPYRIGHT") + "\n" + paragraph ("", COPYRIGHT, HELP_PURPOSE_INDENT) + "\n" + [&](){ - if (REFERENCES.size() == 0) return std::string(); std::string s = bold ("REFERENCES") + "\n"; for (size_t n = 0; n < REFERENCES.size(); ++n) s += paragraph ("", REFERENCES[n], HELP_PURPOSE_INDENT) + "\n"; + s += paragraph ("", MRTRIX_CORE_REFERENCE, HELP_PURPOSE_INDENT) + "\n"; return s; }(); } + std::string usage_syntax (int format) + { + std::string s = "USAGE"; + if (format) + s = bold (s) + "\n\n "; + else + s += ": "; + s += ( format ? underline (NAME, true) : NAME ) + " [ options ]"; + + for (size_t i = 0; i < ARGUMENTS.size(); ++i) { + + if (ARGUMENTS[i].flags & Optional) + s += " ["; + s += std::string(" ") + ARGUMENTS[i].id; + + if (ARGUMENTS[i].flags & AllowMultiple) { + if (! (ARGUMENTS[i].flags & Optional)) + s += std::string(" [ ") + ARGUMENTS[i].id; + s += " ..."; + } + if (ARGUMENTS[i].flags & (Optional | AllowMultiple)) + s += " ]"; + } + return s + "\n\n"; + } + + + @@ -294,30 +345,38 @@ namespace MR - std::string usage_syntax (int format) + Example::operator std::string () const { - std::string s = "USAGE"; + return title + ": $ " + code + " " + description; + } + + + + + std::string Example::syntax (int format) const + { + std::string s = paragraph ("", format ? underline (title + ":") + "\n" : title + ": ", HELP_PURPOSE_INDENT); + s += std::string (HELP_EXAMPLE_INDENT, ' ') + "$ " + code + "\n"; + if (description.size()) + s += paragraph ("", description, HELP_PURPOSE_INDENT); if (format) - s = bold (s) + "\n\n "; - else - s += ": "; - s += ( format ? underline (NAME) : NAME ) + " [ options ]"; + s += "\n"; + return s; + } - for (size_t i = 0; i < ARGUMENTS.size(); ++i) { - if (ARGUMENTS[i].flags & Optional) - s += "["; - s += std::string(" ") + ARGUMENTS[i].id; - if (ARGUMENTS[i].flags & AllowMultiple) { - if (! (ARGUMENTS[i].flags & Optional)) - s += std::string(" [ ") + ARGUMENTS[i].id; - s += " ..."; - } - if (ARGUMENTS[i].flags & (Optional | AllowMultiple)) - s += " ]"; - } - return s + "\n\n"; + + std::string ExampleList::syntax (int format) const + { + if (!size()) + return std::string(); + std::string s; + if (format) + s += bold ("EXAMPLE USAGES") + "\n\n"; + for (size_t i = 0; i < size(); ++i) + s += (*this)[i].syntax (format); + return s; } @@ -325,7 +384,7 @@ namespace MR std::string Argument::syntax (int format) const { - std::string retval = paragraph (( format ? underline (id) : id ), desc, HELP_ARG_INDENT); + std::string retval = paragraph (( format ? underline (id, true) : id ), desc, HELP_ARG_INDENT); if (format) retval += "\n"; return retval; @@ -358,12 +417,13 @@ namespace MR for (size_t i = 0; i < size(); ++i) opt += std::string (" ") + (*this)[i].id; + if (format && (flags & AllowMultiple)) + opt += " (multiple uses permitted)"; + if (format) - opt = " " + opt + "\n" + paragraph ("", desc, HELP_PURPOSE_INDENT); + opt = " " + opt + "\n" + paragraph ("", desc, HELP_PURPOSE_INDENT) + "\n"; else opt = paragraph (opt, desc, HELP_OPTION_INDENT); - if (format) - opt += "\n"; return opt; } @@ -519,6 +579,7 @@ namespace MR + usage_syntax (format) + ARGUMENTS.syntax (format) + DESCRIPTION.syntax (format) + + EXAMPLES.syntax (format) + OPTIONS.syntax (format) + __standard_options.header (format) + __standard_options.contents (format) @@ -593,6 +654,9 @@ namespace MR for (size_t i = 0; i < DESCRIPTION.size(); ++i) s += DESCRIPTION[i] + std::string("\n"); + for (size_t i = 0; i < EXAMPLES.size(); ++i) + s += std::string (EXAMPLES[i]) + std::string("\n"); + for (size_t i = 0; i < ARGUMENTS.size(); ++i) s += ARGUMENTS[i].usage(); @@ -610,7 +674,6 @@ namespace MR - std::string markdown_usage () { /* @@ -619,6 +682,7 @@ namespace MR + usage_syntax (format) + ARGUMENTS.syntax (format) + DESCRIPTION.syntax (format) + + EXAMPLES.syntax (format) + OPTIONS.syntax (format) + __standard_options.header (format) + __standard_options.contents (format) @@ -647,21 +711,25 @@ namespace MR } s += "\n\n"; - auto indent_newlines = [](std::string text) { - size_t index = 0; - while ((index = text.find("\n", index)) != std::string::npos ) - text.replace (index, 1, "
"); - return text; - }; - // Argument description: for (size_t i = 0; i < ARGUMENTS.size(); ++i) - s += std::string("- *") + ARGUMENTS[i].id + "*: " + indent_newlines (ARGUMENTS[i].desc) + "\n"; + s += std::string("- *") + ARGUMENTS[i].id + "*: " + ARGUMENTS[i].desc + "\n"; if (DESCRIPTION.size()) { s += "\n## Description\n\n"; for (size_t i = 0; i < DESCRIPTION.size(); ++i) - s += indent_newlines (DESCRIPTION[i]) + "\n\n"; + s += std::string (DESCRIPTION[i]) + "\n\n"; + } + + if (EXAMPLES.size()) { + s += "\n## Example usages\n\n"; + for (size_t i = 0; i < EXAMPLES.size(); ++i) { + s += std::string ("__") + EXAMPLES[i].title + ":__\n"; + s += std::string ("`$ ") + EXAMPLES[i].code + "`\n"; + if (EXAMPLES[i].description.size()) + s += EXAMPLES[i].description + "\n"; + s += "\n"; + } } @@ -675,7 +743,10 @@ namespace MR std::string f = std::string ("+ **-") + opt.id; for (size_t a = 0; a < opt.size(); ++a) f += std::string (" ") + opt[a].id; - f += std::string("**
") + indent_newlines (opt.desc) + "\n\n"; + f += "**"; + if (opt.flags & AllowMultiple) + f+= " *(multiple uses permitted)*"; + f += std::string("
") + opt.desc + "\n\n"; return f; }; @@ -699,11 +770,11 @@ namespace MR for (size_t i = 0; i < __standard_options.size(); ++i) s += format_option (__standard_options[i]); - if (REFERENCES.size()) { - s += std::string ("## References\n\n"); - for (size_t i = 0; i < REFERENCES.size(); ++i) - s += indent_newlines (REFERENCES[i]) + "\n\n"; - } + s += std::string ("## References\n\n"); + for (size_t i = 0; i < REFERENCES.size(); ++i) + s += std::string (REFERENCES[i]) + "\n\n"; + s += std::string (MRTRIX_CORE_REFERENCE) + "\n\n"; + s += std::string("---\n\nMRtrix ") + mrtrix_version + ", built " + build_date + "\n\n" "\n\n**Author:** " + AUTHOR + "\n\n**Copyright:** " + COPYRIGHT + "\n\n"; @@ -722,6 +793,7 @@ namespace MR + usage_syntax (format) + ARGUMENTS.syntax (format) + DESCRIPTION.syntax (format) + + EXAMPLES.syntax (format) + OPTIONS.syntax (format) + __standard_options.header (format) + __standard_options.contents (format) @@ -751,13 +823,6 @@ namespace MR } s += "\n\n"; - auto indent_newlines = [](std::string text) { - size_t index = 0; - while ((index = text.find("\n", index)) != std::string::npos ) - text.replace (index, 1, ""); - return text; - }; - // Will need more sophisticated escaping of special characters // if they start popping up in argument / option descriptions auto escape_special = [] (std::string text) { @@ -770,15 +835,35 @@ namespace MR }; // Argument description: - for (size_t i = 0; i < ARGUMENTS.size(); ++i) - s += std::string("- *") + ARGUMENTS[i].id + "*: " + escape_special (indent_newlines (ARGUMENTS[i].desc)) + "\n"; + for (size_t i = 0; i < ARGUMENTS.size(); ++i) { + auto desc = split_lines (escape_special (ARGUMENTS[i].desc), false); + s += std::string("- *") + ARGUMENTS[i].id + "*: " + desc[0]; + for (size_t n = 1; n < desc.size(); ++n) + s += " |br|\n " + desc[n]; + s+= "\n"; + } s += "\n"; if (DESCRIPTION.size()) { s += "Description\n-----------\n\n"; - for (size_t i = 0; i < DESCRIPTION.size(); ++i) - s += indent_newlines (DESCRIPTION[i]) + "\n\n"; + for (size_t i = 0; i < DESCRIPTION.size(); ++i) { + auto desc = split_lines (DESCRIPTION[i], false); + s += desc[0]; + for (size_t n = 1; n < desc.size(); ++n) + s += " |br|\n" + desc[n]; + s += "\n\n"; + } + } + + if (EXAMPLES.size()) { + s += "Example usages\n--------------\n\n"; + for (size_t i = 0; i < EXAMPLES.size(); ++i) { + s += std::string ("- *") + EXAMPLES[i].title + "*::\n\n"; + s += std::string (" $ ") + EXAMPLES[i].code + "\n\n"; + if (EXAMPLES[i].description.size()) + s += std::string (" ") + EXAMPLES[i].description + "\n\n"; + } } @@ -792,7 +877,14 @@ namespace MR std::string f = std::string ("- **-") + opt.id; for (size_t a = 0; a < opt.size(); ++a) f += std::string (" ") + opt[a].id; - f += std::string("** ") + escape_special (indent_newlines (opt.desc)) + "\n\n"; + f += std::string("** "); + if (opt.flags & AllowMultiple) + f += "*(multiple uses permitted)* "; + auto desc = split_lines (opt.desc, false); + f += escape_special (desc[0]); + for (size_t n = 1; n < desc.size(); ++n) + f += " |br|\n " + escape_special (desc[n]); + f += "\n\n"; return f; }; @@ -816,11 +908,16 @@ namespace MR for (size_t i = 0; i < __standard_options.size(); ++i) s += format_option (__standard_options[i]); - if (REFERENCES.size()) { - s += std::string ("References\n^^^^^^^^^^\n\n"); - for (size_t i = 0; i < REFERENCES.size(); ++i) - s += indent_newlines (REFERENCES[i]) + "\n\n"; + s += std::string ("References\n^^^^^^^^^^\n\n"); + for (size_t i = 0; i < REFERENCES.size(); ++i) { + auto refs = split_lines (REFERENCES[i], false); + s += refs[0]; + for (size_t n = 1; n < refs.size(); ++n) + s += " |br|\n " + refs[n]; + s += "\n\n"; } + s += std::string(MRTRIX_CORE_REFERENCE) + "\n\n"; + s += std::string("--------------\n\n") + "\n\n**Author:** " + (char*)AUTHOR + "\n\n**Copyright:** " + COPYRIGHT + "\n\n"; @@ -1045,10 +1142,10 @@ namespace MR count++; if (count < 1 && ! (OPTIONS[i][j].flags & Optional)) - throw Exception (std::string ("mandatory option \"") + OPTIONS[i][j].id + "\" must be specified"); + throw Exception (std::string ("mandatory option \"-") + OPTIONS[i][j].id + "\" must be specified"); if (count > 1 && ! (OPTIONS[i][j].flags & AllowMultiple)) - throw Exception (std::string ("multiple instances of option \"") + OPTIONS[i][j].id + "\" are not allowed"); + throw Exception (std::string ("multiple instances of option \"-") + OPTIONS[i][j].id + "\" are not allowed"); } } @@ -1085,8 +1182,8 @@ namespace MR throw Exception ("required input \"" + text + "\" is not a directory"); } if (i.arg->type == ArgFileOut || i.arg->type == TracksOut) { - if (text.find_last_of (PATH_SEPARATOR) == text.size() - std::string(PATH_SEPARATOR).size()) - throw Exception ("output path \"" + std::string(i) + "\" is not a valid file path (ends with \'" PATH_SEPARATOR "\")"); + if (text.find_last_of (PATH_SEPARATORS) == text.size() - 1) + throw Exception ("output path \"" + std::string(i) + "\" is not a valid file path (ends with directory path separator)"); check_overwrite (text); } if (i.arg->type == ArgDirectoryOut) @@ -1113,8 +1210,8 @@ namespace MR throw Exception ("input \"" + text + "\" for option \"-" + std::string(i.opt->id) + "\" is not a directory"); } if (arg.type == ArgFileOut || arg.type == TracksOut) { - if (text.find_last_of (PATH_SEPARATOR) == text.size() - std::string (PATH_SEPARATOR).size()) - throw Exception ("output path \"" + text + "\" for option \"-" + std::string(i.opt->id) + "\" is not a valid file path (ends with \'" PATH_SEPARATOR "\")"); + if (text.find_last_of (PATH_SEPARATORS) == text.size() - 1) + throw Exception ("output path \"" + text + "\" for option \"-" + std::string(i.opt->id) + "\" is not a valid file path (ends with directory path separator)"); check_overwrite (text); } if (arg.type == ArgDirectoryOut) @@ -1159,6 +1256,31 @@ namespace MR throw E; } + auto argv_quoted = [] (const std::string& s) -> std::string { + for (size_t i = 0; i != s.size(); ++i) { + if (!(isalnum(s[i]) || s[i] == '.' || s[i] == '_' || s[i] == '-' || s[i] == '/')) { + std::string escaped_string ("\'"); + for (auto c : s) { + switch (c) { + case '\'': escaped_string.append ("\\\'"); break; + case '\\': escaped_string.append ("\\\\"); break; + default: escaped_string.push_back (c); break; + } + } + escaped_string.push_back ('\''); + return escaped_string; + } + } + return s; + }; + command_history_string = argv[0]; + for (int n = 1; n < argc; ++n) + command_history_string += std::string(" ") + argv_quoted (argv[n]); + command_history_string += std::string (" (version=") + mrtrix_version; + if (project_version) + command_history_string += std::string (", project=") + project_version; + command_history_string += ")"; + std::locale::global (std::locale::classic()); std::setlocale (LC_ALL, "C"); diff --git a/core/app.h b/core/app.h index 4f5120c848..10442aa90a 100644 --- a/core/app.h +++ b/core/app.h @@ -1,24 +1,26 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __app_h__ #define __app_h__ #include #include #include +#include #ifdef None # undef None @@ -43,10 +45,12 @@ namespace MR extern int log_level; extern int exit_error_code; extern std::string NAME; + extern std::string command_history_string; extern bool overwrite_files; extern void (*check_overwrite_files_func) (const std::string& name); extern bool fail_on_warn; extern bool terminal_use_colour; + extern const std::thread::id main_thread_ID; extern int argc; extern const char* const* argv; @@ -76,6 +80,40 @@ namespace MR return *this; } + Description& operator+ (const char* const text[]) { + for (const char* const* p = text; *p; ++p) + push_back (*p); + return *this; + } + + std::string syntax (int format) const; + }; + + + + //! object for storing a single example command usage + class Example { NOMEMALIGN + public: + Example (const std::string& title, + const std::string& code, + const std::string& description) : + title (title), + code (code), + description (description) { } + const std::string title, code, description; + + operator std::string () const; + std::string syntax (int format) const; + }; + + //! a class to hold the list of Example's + class ExampleList : public vector { NOMEMALIGN + public: + ExampleList& operator+ (const Example& example) { + push_back (example); + return *this; + } + std::string syntax (int format) const; }; @@ -293,6 +331,24 @@ namespace MR */ extern Description DESCRIPTION; + //! example usages of the command + /*! This is designed to be used within each command's usage() function. Add + * various examples in order to demonstrate the different syntaxes and/or + * capabilities of the command, e.g.: + * \code + * void usage() { + * ... + * + * EXAMPLES + * + Example ("Perform the command's default functionality", + * "input2output input.mif output.mif", + * "The default usage of this command is as trivial as " + * "providing the name of the command, then the input image, " + * "then the output image."); + * } + * \endcode + */ + extern ExampleList EXAMPLES; //! the arguments expected by the command /*! This is designed to be used within each command's usage() function. Add @@ -330,6 +386,7 @@ namespace MR */ extern OptionList OPTIONS; + //! set to false if command can operate with no arguments /*! By default, the help page is shown command is invoked without * arguments. Some commands (e.g. MRView) can operate without arguments. */ diff --git a/core/apply.h b/core/apply.h index 995aa82347..21dac5eb24 100644 --- a/core/apply.h +++ b/core/apply.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __apply_h__ #define __apply_h__ diff --git a/core/axes.cpp b/core/axes.cpp index 21423f8058..4e41860b05 100644 --- a/core/axes.cpp +++ b/core/axes.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "axes.h" #include "exception.h" @@ -67,7 +68,7 @@ namespace MR - void get_permutation_to_make_axial (const transform_type& T, size_t perm[3], bool flip[3]) + void get_permutation_to_make_axial (const transform_type& T, std::array& perm, std::array& flip) { // Find which row of the transform is closest to each scanner axis decltype(T.matrix().topLeftCorner<3,3>())::Index index; diff --git a/core/axes.h b/core/axes.h index 7d49d334f8..8091ae49b1 100644 --- a/core/axes.h +++ b/core/axes.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __axes_h__ #define __axes_h__ @@ -42,7 +43,7 @@ namespace MR //! determine the axis permutations and flips necessary to make an image //! appear approximately axial - void get_permutation_to_make_axial (const transform_type& T, size_t perm[3], bool flip[3]); + void get_permutation_to_make_axial (const transform_type& T, std::array& perm, std::array& flip); diff --git a/core/bitset.cpp b/core/bitset.cpp deleted file mode 100644 index b411d1c52c..0000000000 --- a/core/bitset.cpp +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ - * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/ - */ - - -#include "bitset.h" - - - -namespace MR { - - - - const uint8_t BitSet::masks[8] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80}; - - const char BitSet::dbyte_to_hex[16] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}; - - - BitSet::BitSet (const size_t b, const bool allocator) : - bits (b), - bytes ((bits + 7) / 8), - data (new uint8_t[bytes]) - { - memset (data, (allocator ? 0xFF : 0x00), bytes); - } - - - BitSet::BitSet (const BitSet& that) : - bits (that.bits), - bytes (that.bytes), - data (new uint8_t[bytes]) - { - memcpy (data, that.data, bytes); - } - - - BitSet::~BitSet() { - delete[] data; data = nullptr; - } - - - - - - void BitSet::resize (const size_t new_size, const bool allocator) - { - - size_t new_bits; - new_bits = new_size; - const size_t new_bytes = (new_bits + 7) / 8; - uint8_t* new_data = new uint8_t[new_bytes]; - if (bytes) { - if (new_bytes > bytes) { - memcpy (new_data, data, bytes); - memset (new_data + bytes, (allocator ? 0xFF : 0x00), new_bytes - bytes); - if (excess_bits()) { - const uint8_t mask = 0xFF << excess_bits(); - new_data[bytes - 1] = allocator ? (data[bytes - 1] | mask) : (data[bytes - 1] & ~mask); - } - } else { - memcpy (new_data, data, new_bytes); - } - } else { - memset (new_data, (allocator ? 0xFF : 0x00), new_bytes); - } - delete[] data; - bits = new_bits; - bytes = new_bytes; - data = new_data; - new_data = nullptr; - - } - - - void BitSet::clear (const bool allocator) - { - memset(data, (allocator ? 0xFF : 0x00), bytes); - } - - - bool BitSet::full() const - { - - const size_t bytes_to_test = (bits % 8) ? bytes - 1 : bytes; - for (size_t i = 0; i != bytes_to_test; ++i) { - if (data[i] != 0xFF) - return false; - } - if (!(bits % 8)) - return true; - - const uint8_t mask = 0xFF << excess_bits(); - if ((data[bytes - 1] | mask) != 0xFF) - return false; - return true; - - } - - - bool BitSet::empty() const - { - - const size_t bytes_to_test = (bits % 8) ? bytes - 1 : bytes; - for (size_t i = 0; i != bytes_to_test; ++i) { - if (data[i]) - return false; - } - if (!(bits % 8)) - return true; - - const size_t excess_bits = bits - (8 * (bytes - 1)); - const uint8_t mask = ~(0xFF << excess_bits); - if (data[bytes - 1] & mask) - return false; - return true; - - } - - - size_t BitSet::count () const - { - size_t count = 0; - for (size_t i = 0; i != bits; ++i) { - if (test (i)) - ++count; - } - return count; - } - - - - - - std::ostream& operator<< (std::ostream& stream, BitSet& d) - { - stream << "0x"; - if (d.excess_bits()) { - const uint8_t mask = 0xFF << d.excess_bits(); - stream << d.byte_to_hex (d.data[d.bytes - 1] & mask); - for (size_t i = d.bytes - 2; i--;) - stream << d.byte_to_hex (d.data[i]); - } else { - for (size_t i = d.bytes - 1; i--;) - stream << d.byte_to_hex (d.data[i]); - } - return stream; - } - - - - - - - - BitSet& BitSet::operator= (const BitSet& that) - { - delete[] data; - bits = that.bits; - bytes = that.bytes; - data = new uint8_t[bytes]; - memcpy (data, that.data, bytes); - return *this; - } - - - bool BitSet::operator== (const BitSet& that) const - { - if (bits != that.bits) - return false; - if (bits % bytes) { - if (memcmp(data, that.data, bytes - 1)) - return false; - const uint8_t mask = ~(0xFF << excess_bits()); - if ((data[bytes - 1] & mask) != (that.data[bytes - 1] & mask)) - return false; - return true; - } else { - return (!memcmp (data, that.data, bytes)); - } - } - - - bool BitSet::operator!= (const BitSet& that) const - { - return (!(*this == that)); - } - - - BitSet& BitSet::operator|= (const BitSet& that) - { - assert (bits == that.bits); - for (size_t i = 0; i != bytes; ++i) - data[i] |= that.data[i]; - return *this; - } - - - BitSet& BitSet::operator&= (const BitSet& that) - { - assert (bits == that.bits); - for (size_t i = 0; i != bytes; ++i) - data[i] &= that.data[i]; - return *this; - } - - - BitSet& BitSet::operator^= (const BitSet& that) - { - assert (bits == that.bits); - for (size_t i = 0; i != bytes; ++i) - data[i] ^= that.data[i]; - return *this; - } - - - BitSet BitSet::operator| (const BitSet& that) const - { - BitSet result (*this); - result |= that; - return result; - } - - - BitSet BitSet::operator& (const BitSet& that) const - { - BitSet result (*this); - result &= that; - return result; - } - - - BitSet BitSet::operator^ (const BitSet& that) const - { - BitSet result (*this); - result ^= that; - return result; - } - - - BitSet BitSet::operator~() const - { - BitSet result (*this); - for (size_t i = 0; i != bytes; ++i) - result.data[i] = ~data[i]; - return result; - } - - - - - - -} diff --git a/core/cmdline_option.h b/core/cmdline_option.h index bab4456b92..08c871b11e 100644 --- a/core/cmdline_option.h +++ b/core/cmdline_option.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __cmdline_option_h__ #define __cmdline_option_h__ @@ -368,7 +369,7 @@ namespace MR * OPTIONS * + Option ("roi", * "the region of interest over which to perform the processing. " - * "Mulitple such regions can be specified") + * "Multiple such regions can be specified") * .required() * .allow_multiple() * + Argument ("image").type_image_in(); diff --git a/core/command.h b/core/command.h index adc0615987..0228883cc3 100644 --- a/core/command.h +++ b/core/command.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __command_h__ #define __command_h__ diff --git a/core/datatype.cpp b/core/datatype.cpp index ee12613adb..77bcc963ec 100644 --- a/core/datatype.cpp +++ b/core/datatype.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "app.h" #include "datatype.h" diff --git a/core/datatype.h b/core/datatype.h index c2343e7e90..a7ecd6f89f 100644 --- a/core/datatype.h +++ b/core/datatype.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __data_type_h__ #define __data_type_h__ @@ -62,6 +63,17 @@ namespace MR bool is_signed () const { return dt & Signed; } + bool is_byte_order_native () { + if (bits() <= 8) + return true; + if (!is_little_endian() && !is_big_endian()) + throw Exception ("byte order not set!"); +#ifdef MRTRIX_BYTE_ORDER_BIG_ENDIAN + return is_big_endian(); +#else + return is_little_endian(); +#endif + } bool is_little_endian () const { return dt & LittleEndian; } @@ -76,6 +88,11 @@ namespace MR const uint8_t type = dt & Type; return ((type == Float32) || (type == Float64)); } + void set_floating_point () { + if (!is_floating_point()) { + dt = Native; + } + } void set_byte_order_native () { if (dt != Bit && dt != Int8 && dt != UInt8) { if (!is_little_endian() && !is_big_endian()) { diff --git a/core/debug.h b/core/debug.h index d91930a8c0..c7f459af4d 100644 --- a/core/debug.h +++ b/core/debug.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __debug_h__ #define __debug_h__ diff --git a/src/dwi/gradient.cpp b/core/dwi/gradient.cpp similarity index 58% rename from src/dwi/gradient.cpp rename to core/dwi/gradient.cpp index 826e99d6eb..478e2ba216 100644 --- a/src/dwi/gradient.cpp +++ b/core/dwi/gradient.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "dwi/gradient.h" #include "file/nifti_utils.h" @@ -24,7 +25,7 @@ namespace MR using namespace App; using namespace Eigen; - OptionGroup GradImportOptions (bool include_bvalue_scaling) + OptionGroup GradImportOptions() { OptionGroup group ("DW gradient table import options"); @@ -45,16 +46,6 @@ namespace MR + Argument ("bvecs").type_file_in() + Argument ("bvals").type_file_in(); - if (include_bvalue_scaling) - group - + Option ("bvalue_scaling", - "specifies whether the b-values should be scaled by the square of " - "the corresponding DW gradient norm, as often required for " - "multi-shell or DSI DW acquisition schemes. The default action can " - "also be set in the MRtrix config file, under the BValueScaling entry. " - "Valid choices are yes/no, true/false, 0/1 (default: true).") - + Argument ("mode").type_bool(); - return group; } @@ -71,9 +62,39 @@ namespace MR + Argument ("bvals_path").type_file_out(); } + Option bvalue_scaling_option = Option ("bvalue_scaling", + "enable or disable scaling of diffusion b-values by the square of the " + "corresponding DW gradient norm (see Desciption). " + "Valid choices are yes/no, true/false, 0/1 (default: automatic).") + + Argument ("mode").type_bool(); + + + const char* const bvalue_scaling_description ( + "The -bvalue_scaling option controls an aspect of the import of " + "diffusion gradient tables. When the input diffusion-weighting " + "direction vectors have norms that differ substantially from unity, " + "the b-values will be scaled by the square of their corresponding " + "vector norm (this is how multi-shell acquisitions are frequently " + "achieved on scanner platforms). However in some rare instances, the " + "b-values may be correct, despite the vectors not being of unit norm " + "(or conversely, the b-values may need to be rescaled even though the " + "vectors are close to unit norm). This option allows the user to " + "control this operation and override MRrtix3's automatic detection." + ); + BValueScalingBehaviour get_cmdline_bvalue_scaling_behaviour () + { + auto opt = App::get_options ("bvalue_scaling"); + if (opt.empty()) + return BValueScalingBehaviour::Auto; + if (opt[0][0]) + return BValueScalingBehaviour::UserOn; + return BValueScalingBehaviour::UserOff; + } + + Eigen::MatrixXd parse_DW_scheme (const Header& header) { Eigen::MatrixXd G; @@ -178,8 +199,8 @@ namespace MR if (adjusted_transform.linear().determinant() > 0.0) bvecs.row(0) = -bvecs.row(0); - save_matrix (bvecs, bvecs_path); - save_matrix (bvals, bvals_path); + save_matrix (bvecs, bvecs_path, KeyValues(), false); + save_matrix (bvals, bvals_path, KeyValues(), false); } @@ -193,36 +214,27 @@ namespace MR - Eigen::MatrixXd get_DW_scheme (const Header& header) + Eigen::MatrixXd get_raw_DW_scheme (const Header& header) { DEBUG ("searching for suitable gradient encoding..."); using namespace App; Eigen::MatrixXd grad; - try { - const auto opt_mrtrix = get_options ("grad"); + // check whether the DW scheme has been provided via the command-line: + const auto opt_mrtrix = get_options ("grad"); + if (opt_mrtrix.size()) + grad = load_matrix<> (opt_mrtrix[0][0]); + + const auto opt_fsl = get_options ("fslgrad"); + if (opt_fsl.size()) { if (opt_mrtrix.size()) - grad = load_matrix<> (opt_mrtrix[0][0]); - const auto opt_fsl = get_options ("fslgrad"); - if (opt_fsl.size()) { - if (opt_mrtrix.size()) - throw Exception ("Please provide diffusion gradient table using either -grad or -fslgrad option (not both)"); - grad = load_bvecs_bvals (header, opt_fsl[0][0], opt_fsl[0][1]); - } - if (!opt_mrtrix.size() && !opt_fsl.size()) - grad = parse_DW_scheme (header); - } - catch (Exception& e) { - throw Exception (e, "error importing diffusion gradient table for image \"" + header.name() + "\""); + throw Exception ("Diffusion gradient table can be provided using either -grad or -fslgrad option, but NOT both"); + grad = load_bvecs_bvals (header, opt_fsl[0][0], opt_fsl[0][1]); } - if (!grad.rows()) - return grad; - - if (grad.cols() < 4) - throw Exception ("unexpected diffusion gradient table matrix dimensions"); - - INFO ("found " + str (grad.rows()) + "x" + str (grad.cols()) + " diffusion gradient table"); + // otherwise use the information from the header: + if (!opt_mrtrix.size() && !opt_fsl.size()) + grad = parse_DW_scheme (header); return grad; } @@ -231,36 +243,60 @@ namespace MR - void validate_DW_scheme (Eigen::MatrixXd& grad, const Header& header, bool nofail) - { - if (grad.rows() == 0) - throw Exception ("no diffusion encoding information found in image \"" + header.name() + "\""); - - //CONF option: BValueScaling - //CONF default: 1 (true) - //CONF Specifies whether the b-values should be scaled by the squared - //CONF norm of the gradient vectors when loading a DW gradient scheme. - //CONF This is commonly required to correctly interpret images acquired - //CONF on scanners that nominally only allow a single b-value, as the - //CONF common workaround is to scale the gradient vectors to modulate - //CONF the actual b-value. - bool scale_bvalues = true; - auto opt = App::get_options ("bvalue_scaling"); - if (opt.size()) - scale_bvalues = opt[0][0]; - else - scale_bvalues = File::Config::get_bool ("BValueScaling", scale_bvalues); - - if (scale_bvalues) - scale_bvalue_by_G_squared (grad); + Eigen::MatrixXd get_DW_scheme (const Header& header, BValueScalingBehaviour bvalue_scaling) + { try { + auto grad = get_raw_DW_scheme (header); check_DW_scheme (header, grad); - normalise_grad (grad); + + Eigen::Array squared_norms = grad.leftCols(3).rowwise().squaredNorm(); + // ensure interpreted directions are always normalised + // also make sure that directions of [0, 0, 0] don't affect subsequent calculations + for (ssize_t row = 0; row != grad.rows(); ++row) { + if (squared_norms[row]) + grad.row(row).template head<3>().array() /= std::sqrt(squared_norms[row]); + else + squared_norms[row] = 1.0; + } + // modulate verbosity of message & whether or not header is modified + // based on magnitude of effect of normalisation + const default_type max_log_scaling_factor = squared_norms.log().abs().maxCoeff(); + const default_type max_scaling_factor = std::exp (max_log_scaling_factor); + const bool exceeds_single_precision = max_log_scaling_factor > 1e-5; + const bool requires_bvalue_scaling = max_log_scaling_factor > 0.01; + + DEBUG ("b-value scaling: max scaling factor = exp(" + + str(max_log_scaling_factor) + ") = " + str(max_scaling_factor)); + + if (( requires_bvalue_scaling && bvalue_scaling == BValueScalingBehaviour::Auto ) || + bvalue_scaling == BValueScalingBehaviour::UserOn ) { + grad.col(3).array() *= squared_norms; + INFO ("b-values scaled by the square of DW gradient norm " + "(maximum scaling factor = " + str(max_scaling_factor) + ")"); + } + else if (bvalue_scaling == BValueScalingBehaviour::UserOff ) { + if (requires_bvalue_scaling) { + CONSOLE ("disabling b-value scaling during normalisation of DW vectors on user request " + "(maximum scaling factor would have been " + str(max_scaling_factor) + ")"); + } else { + WARN ("use of -bvalue_scaling option had no effect: gradient vector norms are all within tolerance " + "(maximum scaling factor = " + str(max_scaling_factor) + ")"); + } + } + + // write the scheme as interpreted back into the header if: + // - vector normalisation effect is large, regardless of whether or not b-value scaling was applied + // - gradient information was pulled from file + if (exceeds_single_precision || get_options ("grad").size() || get_options ("fslgrad").size()) + set_DW_scheme (const_cast (header), grad); + + INFO ("found " + str (grad.rows()) + "x" + str (grad.cols()) + " diffusion gradient table"); + return grad; } catch (Exception& e) { - if (!nofail) - throw Exception (e, "unable to get valid diffusion gradient table for image \"" + header.name() + "\""); + clear_DW_scheme (const_cast (header)); + throw Exception (e, "error importing diffusion gradient table for image \"" + header.name() + "\""); } } diff --git a/src/dwi/gradient.h b/core/dwi/gradient.h similarity index 68% rename from src/dwi/gradient.h rename to core/dwi/gradient.h index ae706a32b4..e2550d218c 100644 --- a/src/dwi/gradient.h +++ b/core/dwi/gradient.h @@ -1,35 +1,30 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __dwi_gradient_h__ #define __dwi_gradient_h__ -// These lines are to silence deprecation warnings with Eigen & GCC v5 -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#include -#pragma GCC diagnostic pop - - #include "app.h" -#include "file/path.h" -#include "file/config.h" #include "header.h" +#include "dwi/shells.h" +#include "file/config.h" +#include "file/path.h" +#include "math/condition_number.h" #include "math/sphere.h" #include "math/SH.h" -#include "dwi/shells.h" namespace MR @@ -39,25 +34,40 @@ namespace MR namespace DWI { - App::OptionGroup GradImportOptions (bool include_bvalue_scaling = true); + App::OptionGroup GradImportOptions(); App::OptionGroup GradExportOptions(); + extern App::Option bvalue_scaling_option; + extern const char* const bvalue_scaling_description; + - //! ensure each non-b=0 gradient vector is normalised to unit amplitude + + //! check that the DW scheme matches the DWI data in \a header + /*! \note This is mostly for internal use. If you have obtained the DW + * scheme using DWI::get_DW_scheme(), it should already by guaranteed to + * match the corresponding header. */ template - Eigen::MatrixXd& normalise_grad (MatrixType& grad) + inline void check_DW_scheme (const Header& header, const MatrixType& grad) { - if (grad.cols() < 3) - throw Exception ("invalid diffusion gradient table dimensions (" + str(grad.rows()) + " x " + str(grad.cols()) + ")"); - for (ssize_t i = 0; i < grad.rows(); i++) { - auto norm = grad.row(i).template head<3>().norm(); - if (norm) - grad.row(i).template head<3>().array() /= norm; + if (!grad.rows()) + throw Exception ("no valid diffusion gradient table found"); + if (grad.cols() < 4) + throw Exception ("unexpected diffusion gradient table matrix dimensions"); + + if (header.ndim() >= 4) { + if (header.size (3) != (int) grad.rows()) + throw Exception ("number of studies in base image (" + str(header.size(3)) + + ") does not match number of rows in diffusion gradient table (" + str(grad.rows()) + ")"); } - return grad; + else if (grad.rows() != 1) + throw Exception ("For images with less than four dimensions, gradient table can have one row only"); } + + + + /*! \brief convert the DW encoding matrix in \a grad into a * azimuth/elevation direction set, using only the DWI volumes as per \a * dwi */ @@ -93,8 +103,7 @@ namespace MR else // Cartesian to spherical: g = Math::Sphere::cartesian2spherical (dirs).leftCols(2); - auto v = Eigen::JacobiSVD (Math::SH::init_transform (g, lmax)).singularValues(); - return v[0] / v[v.size()-1]; + return Math::condition_number (Math::SH::init_transform (g, lmax)); } @@ -120,16 +129,21 @@ namespace MR - //! scale b-values by square of gradient norm - template - void scale_bvalue_by_G_squared (MatrixType& G) + namespace + { + template + std::string scheme2str (const MatrixType& G) { - INFO ("b-values will be scaled by the square of DW gradient norm"); - for (ssize_t n = 0; n < G.rows(); ++n) - if (G(n,3)) - G(n,3) *= G.row(n).template head<3>().squaredNorm(); + std::string dw_scheme; + for (ssize_t row = 0; row < G.rows(); ++row) { + std::string line = str(G(row,0), 10); + for (ssize_t col = 1; col < G.cols(); ++col) + line += "," + str(G(row,col), 10); + add_line (dw_scheme, line); + } + return dw_scheme; } - + } @@ -146,19 +160,12 @@ namespace MR header.keyval().erase (it); return; } - std::string dw_scheme; - for (ssize_t row = 0; row < G.rows(); ++row) { - std::string line; - for (ssize_t col = 0; col < G.cols(); ++col) { - line += str(G(row,col), 10); - if (col < G.cols() - 1) line += ","; - } - add_line (dw_scheme, line); + try { + check_DW_scheme (header, G); + header.keyval()["dw_scheme"] = scheme2str (G); + } catch (Exception&) { + WARN ("attempt to add non-matching DW scheme to header - ignored"); } - if (dw_scheme.size()) - header.keyval()["dw_scheme"] = dw_scheme; - else - WARN ("attempt to add empty DW scheme to header - ignored"); } @@ -167,11 +174,31 @@ namespace MR /*! extract the DW gradient encoding matrix stored in the \a header if one * is present. This is expected to be stored in the Header::keyval() * structure, under the key 'dw_scheme'. + * + * \note This is mostly for internal use. In general, you should use + * DWI::get_DW_scheme() */ Eigen::MatrixXd parse_DW_scheme (const Header& header); + //! get the DW scheme as found in the headers or supplied at the command-line + /*! return the DW gradient encoding matrix found from the command-line + * arguments, or if not provided that way, as stored in the \a header. + * This return the scheme prior to any modification or validation. + * + * \note This is mostly for internal use. In general, you should use + * DWI::get_DW_scheme() + */ + Eigen::MatrixXd get_raw_DW_scheme (const Header& header); + + + + //! clear any DW gradient encoding scheme from the header + void clear_DW_scheme (Header&); + + + //! 'stash' the DW gradient table /*! Store the _used_ DW gradient table to Header::keyval() key * 'prior_dw_scheme', and delete the key 'dw_scheme' if it exists. @@ -184,47 +211,35 @@ namespace MR template void stash_DW_scheme (Header& header, const MatrixType& grad) { - set_DW_scheme (header, grad); - auto dw_scheme = header.keyval().find ("dw_scheme"); - if (dw_scheme != header.keyval().end()) { - header.keyval()["prior_dw_scheme"] = dw_scheme->second; - header.keyval().erase (dw_scheme); - } + clear_DW_scheme (header); + if (grad.rows()) + header.keyval()["prior_dw_scheme"] = scheme2str (grad); } - //! clear any DW gradient encoding scheme from the header - void clear_DW_scheme (Header&); + enum class BValueScalingBehaviour { Auto, UserOn, UserOff }; + BValueScalingBehaviour get_cmdline_bvalue_scaling_behaviour (); - //! get the DW gradient encoding matrix - /*! attempts to find the DW gradient encoding matrix, using the following + //! get the fully-interpreted DW gradient encoding matrix + /*! find and validate the DW gradient encoding matrix, using the following * procedure: - * - if the -grad option has been supplied, then load the matrix assuming + * - if the \c -grad option has been supplied, then load the matrix assuming * it is in MRtrix format, and return it; - * - if the -fslgrad option has been supplied, then load and rectify the + * - if the \c -fslgrad option has been supplied, then load and rectify the * bvecs/bvals pair using load_bvecs_bvals() and return it; * - if the DW_scheme member of the header is non-empty, return it; - * - if no source of gradient encoding is found, return an empty matrix. + * - validate the DW scheme and ensure it matches the header; + * - if \a bvalue_scaling is \c true (the default), scale the b-values + * accordingly, but only if non-unit vectors are detected; + * - normalise the gradient vectors; + * - update the header with this information */ - Eigen::MatrixXd get_DW_scheme (const Header& header); + Eigen::MatrixXd get_DW_scheme (const Header& header, BValueScalingBehaviour bvalue_scaling = BValueScalingBehaviour::Auto); - //! check that the DW scheme matches the DWI data in \a header - template - inline void check_DW_scheme (const Header& header, const MatrixType& grad) - { - if (!grad.rows()) - throw Exception ("no valid diffusion gradient table found"); - - if (header.ndim() != 4) - throw Exception ("dwi image should contain 4 dimensions"); - - if (header.size (3) != (int) grad.rows()) - throw Exception ("number of studies in base image (" + str(header.size(3)) + ") does not match number of rows in diffusion gradient table (" + str(grad.rows()) + ")"); - } //! process GradExportOptions command-line options @@ -233,26 +248,6 @@ namespace MR void export_grad_commandline (const Header& header); - /*! \brief validate the DW encoding matrix \a grad and - * check that it matches the DW header in \a header - * - * This ensures the dimensions match the corresponding DWI data, applies - * b-value scaling if specified, and normalises the gradient vectors. */ - void validate_DW_scheme (Eigen::MatrixXd& grad, const Header& header, bool nofail = false); - - /*! \brief get the DW encoding matrix as per get_DW_scheme(), and - * check that it matches the DW header in \a header - * - * This is the version that should be used in any application that - * processes the DWI raw data. */ - inline Eigen::MatrixXd get_valid_DW_scheme (const Header& header, bool nofail = false) - { - auto grad = get_DW_scheme (header); - validate_DW_scheme (grad, header, nofail); - return grad; - } - - //! \brief get the matrix mapping SH coefficients to amplitudes /*! Computes the matrix mapping SH coefficients to the directions specified * in \a directions (in spherical coordinates), up to a given lmax. By @@ -302,8 +297,7 @@ namespace MR Eigen::MatrixXd mapping; do { mapping = Math::SH::init_transform (directions, lmax); - auto v = Eigen::JacobiSVD (mapping).singularValues(); - auto cond = v[0] / v[v.size()-1]; + const default_type cond = Math::condition_number (mapping); if (cond < 10.0) break; WARN ("directions are poorly distributed for lmax = " + str(lmax) + " (condition number = " + str (cond) + ")"); diff --git a/src/dwi/shells.cpp b/core/dwi/shells.cpp similarity index 94% rename from src/dwi/shells.cpp rename to core/dwi/shells.cpp index 8e41e1071f..cc132aa6e1 100644 --- a/src/dwi/shells.cpp +++ b/core/dwi/shells.cpp @@ -1,22 +1,22 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - -#include "debug.h" +#include "dwi/shells.h" #include "math/math.h" -#include "dwi/shells.h" @@ -40,6 +40,13 @@ namespace MR + FORCE_INLINE default_type bvalue_epsilon () { + static const default_type value = File::Config::get_float ("BValueEpsilon", DWI_SHELLS_EPSILON); + return value; + } + + + Shell::Shell (const Eigen::MatrixXd& grad, const vector& indices) : @@ -342,6 +349,14 @@ namespace MR BitSet visited (bvals.size(), false); size_t clusterIdx = 0; + for (ssize_t ii = 0; ii != bvals.size(); ii++) { + if (bvals[ii] <= bzero_threshold()) { + visited[ii] = true; + clusterIdx = 1; + clusters[ii] = 1; + } + } + for (ssize_t ii = 0; ii != bvals.size(); ii++) { if (!visited[ii]) { @@ -383,7 +398,7 @@ namespace MR void Shells::regionQuery (const BValueList& bvals, const default_type b, vector& idx) const { for (ssize_t i = 0; i < bvals.size(); i++) { - if (abs (b - bvals[i]) < DWI_SHELLS_EPSILON) + if (bvals[i] > bzero_threshold() && abs (b - bvals[i]) < bvalue_epsilon()) idx.push_back (i); } } diff --git a/src/dwi/shells.h b/core/dwi/shells.h similarity index 83% rename from src/dwi/shells.h rename to core/dwi/shells.h index cada7c15be..ca945c6fc4 100644 --- a/src/dwi/shells.h +++ b/core/dwi/shells.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __dwi_shells_h__ #define __dwi_shells_h__ @@ -21,22 +22,23 @@ #include #include "app.h" -#include "bitset.h" #include "types.h" - #include "file/config.h" +#include "misc/bitset.h" // Don't expect these values to change depending on the particular command that is initialising the Shells class; // method should be robust to all incoming data // Maximum absolute difference in b-value for two volumes to be considered to be in the same shell -#define DWI_SHELLS_EPSILON 100 +#define DWI_SHELLS_EPSILON 80 // Minimum number of volumes within DWI_SHELL_EPSILON necessary to continue expansion of the cluster selection #define DWI_SHELLS_MIN_LINKAGE 3 // Default number of volumes necessary for a shell to be retained // (note: only applies if function reject_small_shells() is called explicitly) #define DWI_SHELLS_MIN_DIRECTIONS 6 +// Default b-value threshold for a shell to be classified as "b=0" +#define DWI_SHELLS_BZERO_THREHSOLD 10.0 @@ -45,6 +47,11 @@ //CONF Specifies the b-value threshold for determining those image //CONF volumes that correspond to b=0. +//CONF option: BValueEpsilon +//CONF default: 80.0 +//CONF Specifies the difference between b-values necessary for image +//CONF volumes to be classified as belonging to different shells. + namespace MR @@ -58,7 +65,7 @@ namespace MR extern const App::OptionGroup ShellsOption; FORCE_INLINE default_type bzero_threshold () { - static const default_type value = File::Config::get_float ("BZeroThreshold", 10.0); + static const default_type value = File::Config::get_float ("BZeroThreshold", DWI_SHELLS_BZERO_THREHSOLD); return value; } diff --git a/core/eigen_plugins/array.h b/core/eigen_plugins/array.h index b78da4a29e..2a7fec77af 100644 --- a/core/eigen_plugins/array.h +++ b/core/eigen_plugins/array.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #define MRTRIX_OP(ARG) \ template inline Array& operator ARG (const MR::Helper::ConstRow& row) {\ this->resize (row.image.size(row.axis),1); \ diff --git a/core/eigen_plugins/dense_base.h b/core/eigen_plugins/dense_base.h index f7654bb6de..af67d86ee9 100644 --- a/core/eigen_plugins/dense_base.h +++ b/core/eigen_plugins/dense_base.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #define MRTRIX_OP(ARG) \ template \ inline Derived& operator ARG (const MR::Helper::ConstRow& row) { \ diff --git a/core/eigen_plugins/matrix.h b/core/eigen_plugins/matrix.h index 46626f986f..bc2676bde2 100644 --- a/core/eigen_plugins/matrix.h +++ b/core/eigen_plugins/matrix.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - template Matrix (const MR::Helper::ConstRow& row) : Base () { operator= (row); } template Matrix (const MR::Helper::Row& row) : Base () { operator= (row); } diff --git a/core/exception.cpp b/core/exception.cpp index e68bfa035f..bf4a9367ca 100644 --- a/core/exception.cpp +++ b/core/exception.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "app.h" #include "exception.h" #include "file/config.h" diff --git a/core/exception.h b/core/exception.h index 787943f220..e89fa085ca 100644 --- a/core/exception.h +++ b/core/exception.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __mrtrix_exception_h__ #define __mrtrix_exception_h__ @@ -99,6 +100,10 @@ namespace MR void push_back (const std::string& s) { description.push_back (s); } + void push_back (const Exception& e) { + for (auto s : e.description) + push_back (s); + } static void (*display_func) (const Exception& E, int log_level); @@ -112,6 +117,12 @@ namespace MR : Exception(previous_exception, msg) {} }; + + class CancelException : public Exception { NOMEMALIGN + public: + CancelException () : Exception ("operation cancelled by user") { } + }; + void display_exception_cmdline (const Exception& E, int log_level); void cmdline_print_func (const std::string& msg); void cmdline_report_to_user_func (const std::string& msg, int type); diff --git a/core/file/config.cpp b/core/file/config.cpp index c908360948..9ea73a07e0 100644 --- a/core/file/config.cpp +++ b/core/file/config.cpp @@ -1,19 +1,22 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - +#include "app.h" #include "debug.h" +#include "header.h" #include "file/path.h" #include "file/config.h" @@ -29,18 +32,25 @@ namespace MR namespace File { - std::map Config::config; + KeyValues Config::config; + + //ENVVAR name: MRTRIX_CONFIGFILE + //ENVVAR This can be used to set the location of the system-wide + //ENVVAR configuration file. By default, this is ``/etc/mrtrix.conf``. + //ENVVAR This can be useful for deployments where access to the system's + //ENVVAR ``/etc`` folder is problematic, or to allow different versions of + //ENVVAR the software to have different configurations, etc. void Config::init () { const char* sysconf_location = getenv ("MRTRIX_CONFIGFILE"); - if (!sysconf_location) + if (!sysconf_location) sysconf_location = MRTRIX_SYS_CONFIG_FILE; if (Path::is_file (sysconf_location)) { INFO (std::string("reading config file \"") + sysconf_location + "\"..."); try { - KeyValue kv (sysconf_location); + KeyValue::Reader kv (sysconf_location); while (kv.next()) { config[kv.key()] = kv.value(); } @@ -54,7 +64,7 @@ namespace MR if (Path::is_file (path)) { INFO ("reading config file \"" + path + "\"..."); try { - KeyValue kv (path); + KeyValue::Reader kv (path); while (kv.next()) { config[kv.key()] = kv.value(); } @@ -63,6 +73,16 @@ namespace MR } else { DEBUG ("No config file found at \"" + path + "\""); } + + auto opt = App::get_options ("config"); + for (const auto& keyval : opt) + config[std::string(keyval[0])] = std::string(keyval[1]); + + //CONF option: RealignTransform + //CONF default: 1 (true) + //CONF A boolean value to indicate whether all images should be realigned + //CONF to an approximately axial orientation at load. + Header::do_realign_transform = get_bool("RealignTransform", true); } @@ -70,7 +90,7 @@ namespace MR bool Config::get_bool (const std::string& key, bool default_value) { std::string value = get (key); - if (value.empty()) + if (value.empty()) return default_value; try { return to (value); @@ -85,7 +105,7 @@ namespace MR int Config::get_int (const std::string& key, int default_value) { std::string value = get (key); - if (value.empty()) + if (value.empty()) return default_value; try { return to (value); @@ -100,7 +120,7 @@ namespace MR float Config::get_float (const std::string& key, float default_value) { std::string value = get (key); - if (value.empty()) + if (value.empty()) return default_value; try { return to (value); @@ -119,7 +139,7 @@ namespace MR if (value.size()) { try { vector V (parse_floats (value)); - if (V.size() < 3) + if (V.size() < 3) throw Exception ("malformed RGB entry \"" + value + "\" for key \"" + key + "\" in configuration file - ignored"); ret[0] = V[0]; ret[1] = V[1]; diff --git a/core/file/config.h b/core/file/config.h index e5fb2b5d27..ff8208e630 100644 --- a/core/file/config.h +++ b/core/file/config.h @@ -1,22 +1,24 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_config_h__ #define __file_config_h__ #include +#include "types.h" #include "file/key_value.h" namespace MR @@ -32,33 +34,21 @@ namespace MR config[key] = value; } static std::string get (const std::string& key) { - std::map::iterator i = config.find (key); + const KeyValues::const_iterator i = config.find (key); return (i != config.end() ? i->second : ""); } static std::string get (const std::string& key, const std::string& default_value) { - std::map::iterator i = config.find (key); + KeyValues::iterator i = config.find (key); return (i != config.end() ? i->second : default_value); } - static void set_bool (const std::string& key, bool value) { - set (key, (value ? "true" : "false")); - } - static bool get_bool (const std::string& key, bool default_value); - - static void set_int (const std::string& key, int value) { - set (key, str (value)); - } - static int get_int (const std::string& key, int default_value); - - static void set_float (const std::string& key, float value) { - set (key, str (value)); - } + static bool get_bool (const std::string& key, bool default_value); + static int get_int (const std::string& key, int default_value); static float get_float (const std::string& key, float default_value); - - static void get_RGB (const std::string& key, float* ret, float default_R, float default_G, float default_B); + static void get_RGB (const std::string& key, float* ret, float default_R, float default_G, float default_B); private: - static std::map config; + static KeyValues config; }; } } diff --git a/core/file/copy.h b/core/file/copy.h index 9682f6c355..11c35c1315 100644 --- a/core/file/copy.h +++ b/core/file/copy.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_copy_h__ #define __file_copy_h__ diff --git a/core/file/dicom/csa_entry.h b/core/file/dicom/csa_entry.h index 9f5e4b8980..741205ca7c 100644 --- a/core/file/dicom/csa_entry.h +++ b/core/file/dicom/csa_entry.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_csa_entry_h__ #define __file_dicom_csa_entry_h__ @@ -127,6 +128,18 @@ namespace MR { v[m] = NaN; } + vector get_string () const { + vector result; + const uint8_t* p = start + 84; + for (uint32_t m = 0; m < nitems; m++) { + const uint32_t length = Raw::fetch_LE (p); + std::string s (reinterpret_cast (p)+16, length); + result.push_back (std::move (s)); + p += 16 + 4*((length+3)/4); + } + return result; + } + friend std::ostream& operator<< (std::ostream& stream, const CSAEntry& item) { stream << "[CSA] " << item.name << " (" + str(item.nitems) + " items):"; const uint8_t* next = item.start + 84; diff --git a/core/file/dicom/definitions.h b/core/file/dicom/definitions.h index 965e404074..370868d1cf 100644 --- a/core/file/dicom/definitions.h +++ b/core/file/dicom/definitions.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_dict_h__ #define __file_dicom_dict_h__ diff --git a/core/file/dicom/dict.cpp b/core/file/dicom/dict.cpp index 74e306c144..48b9cafa3f 100644 --- a/core/file/dicom/dict.cpp +++ b/core/file/dicom/dict.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/dicom/element.h" namespace MR { diff --git a/core/file/dicom/element.cpp b/core/file/dicom/element.cpp index 4f146a65b6..34ba74f608 100644 --- a/core/file/dicom/element.cpp +++ b/core/file/dicom/element.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/path.h" #include "file/dicom/element.h" #include "debug.h" @@ -49,6 +50,21 @@ namespace MR { + const char* Element::type_as_str[] = { "invalid", + "integer", + "unsigned integer", + "floating-point", + "date", + "time", + "string", + "sequence", + "other", + nullptr }; + + + + + void Element::set (const std::string& filename, bool force_read, bool read_write) { @@ -383,10 +399,8 @@ namespace MR { return { printf ("%04X %04X", Raw::fetch_ (data, is_BE), Raw::fetch_ (data+2, is_BE)) }; auto strings = split (std::string (reinterpret_cast (data), size), "\\", false); - for (auto& entry: strings) { + for (auto& entry: strings) entry = strip (entry); - replace (entry, '^', ' '); - } return strings; } @@ -431,7 +445,9 @@ namespace MR { } } catch (Exception& e) { - e.display(); + DEBUG ("Error converting data at offset " + str(offset(start)) + " to " + type_as_str[type()] + " type: "); + for (auto& s : e.description) + DEBUG (s); return "invalid entry"; } return ""; @@ -504,6 +520,7 @@ namespace MR { } + } } } diff --git a/core/file/dicom/element.h b/core/file/dicom/element.h index 6f823b1799..5a006af2b0 100644 --- a/core/file/dicom/element.h +++ b/core/file/dicom/element.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_element_h__ #define __file_dicom_element_h__ @@ -54,11 +55,14 @@ namespace MR { class Time { NOMEMALIGN public: - Time (const std::string& entry) : - hour (to (entry.substr (0, 2))), - minute (to (entry.substr (2, 2))), - second (to (entry.substr (4, 2))), - fraction (entry.size() > 6 ? to (entry.substr (6)) : 0.0) { } + Time (const std::string& entry) : Time() { + if (entry.size() < 6) + throw Exception ("field \"" + entry + "\" is too short to be interpreted as a time"); + hour = to (entry.substr (0, 2)); + minute = to (entry.substr (2, 2)); + second = to (entry.substr (4, 2)); + fraction = entry.size() > 6 ? to (entry.substr (6)) : 0.0; + } Time (default_type i) { if (i < 0.0) @@ -197,6 +201,8 @@ namespace MR { void error_in_get (size_t idx) const; void error_in_check_size (size_t min_size, size_t actual_size) const; void report_unknown_tag_with_implicit_syntax () const; + + static const char* type_as_str[]; }; diff --git a/core/file/dicom/image.cpp b/core/file/dicom/image.cpp index 6c882e2475..6ab8272bfb 100644 --- a/core/file/dicom/image.cpp +++ b/core/file/dicom/image.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "exception.h" #include "file/path.h" #include "file/dicom/image.h" @@ -82,6 +83,9 @@ namespace MR { case 0x0081U: echo_time = item.get_float (0, echo_time); return; + case 0x0082U: + inversion_time = item.get_float (0, inversion_time); + return; case 0x0088U: slice_spacing = item.get_float (0, slice_spacing); return; @@ -311,8 +315,8 @@ namespace MR { parse_item (item); } catch (Exception& E) { - WARN (printf ("error reading tag (%04X,%04X):", item.group, item.element)); - E.display(1); + DEBUG (printf ("error reading tag (%04X,%04X):", item.group, item.element)); + E.display(3); } } @@ -332,7 +336,25 @@ namespace MR { - + namespace { + template + void phoenix_scalar (const KeyValues& keyval, const std::string& key, const Functor& functor, T& field) { + const auto it = keyval.find (key); + if (it == keyval.end()) + return; + field = functor (it->second); + } + template + void phoenix_vector (const KeyValues& keyval, const std::string& key, vector& data) { + data.clear(); + for (size_t index = 0; ; ++index) { + const auto it = keyval.find (key + "[" + str(index) + "]"); + if (it == keyval.end()) + return; + data.push_back (to (it->second)); + } + } + } void Image::decode_csa (const uint8_t* start, const uint8_t* end) @@ -342,28 +364,10 @@ namespace MR { while (entry.parse()) { if (strcmp ("B_value", entry.key()) == 0) bvalue = entry.get_float(); - else if (strcmp ("DiffusionGradientDirection", entry.key()) == 0) - entry.get_float (G); - else if (strcmp ("NumberOfImagesInMosaic", entry.key()) == 0) - images_in_mosaic = entry.get_int(); - else if (strcmp ("SliceNormalVector", entry.key()) == 0) - entry.get_float (orientation_z); - else if (strcmp ("PhaseEncodingDirectionPositive", entry.key()) == 0) - pe_sign = (entry.get_int() > 0) ? 1 : -1; else if (strcmp ("BandwidthPerPixelPhaseEncode", entry.key()) == 0) bandwidth_per_pixel_phase_encode = entry.get_float(); - else if (strcmp ("MosaicRefAcqTimes", entry.key()) == 0) { - mosaic_slices_timing.resize (entry.num_items()); - entry.get_float (mosaic_slices_timing); - } - else if (strcmp ("TimeAfterStart", entry.key()) == 0) - time_after_start = entry.get_float(); - else if (strcmp ("ImagePositionPatient", entry.key()) == 0) { - Eigen::Matrix v; - entry.get_float (v); - if (v.allFinite()) - position_vector = v; - } + else if (strcmp ("DiffusionGradientDirection", entry.key()) == 0) + entry.get_float (G); else if (strcmp ("ImageOrientationPatient", entry.key()) == 0) { Eigen::Matrix v; entry.get_float (v); @@ -374,6 +378,53 @@ namespace MR { orientation_y.normalize(); } } + else if (strcmp ("ImagePositionPatient", entry.key()) == 0) { + Eigen::Matrix v; + entry.get_float (v); + if (v.allFinite()) + position_vector = v; + } + else if (strcmp ("MosaicRefAcqTimes", entry.key()) == 0) { + mosaic_slices_timing.resize (entry.num_items()); + entry.get_float (mosaic_slices_timing); + } + else if (strcmp ("MrPhoenixProtocol", entry.key()) == 0) { + const vector phoenix = entry.get_string(); + const auto keyval = read_csa_ascii (phoenix); + phoenix_scalar (keyval, + "sDiffusion.dsScheme", + [] (const std::string& value) -> size_t { return to (value); }, + bipolar_flag); + phoenix_scalar (keyval, + "sKSpace.ucPhasePartialFourier", + [] (const std::string& value) -> default_type + { + switch (to (value)) { + case 1: return 0.5; + case 2: return 0.675; + case 4: return 0.75; + case 8: return 0.875; + default: return NaN; + } + }, + partial_fourier); + phoenix_scalar (keyval, + "ucReadOutMode", + [] (const std::string& value) -> size_t { return to (value); }, + readoutmode_flag); + phoenix_vector (keyval, + "adFlipAngleDegree", + flip_angles); + + } + else if (strcmp ("NumberOfImagesInMosaic", entry.key()) == 0) + images_in_mosaic = entry.get_int(); + else if (strcmp ("PhaseEncodingDirectionPositive", entry.key()) == 0) + pe_sign = (entry.get_int() > 0) ? 1 : -1; + else if (strcmp ("SliceNormalVector", entry.key()) == 0) + entry.get_float (orientation_z); + else if (strcmp ("TimeAfterStart", entry.key()) == 0) + time_after_start = entry.get_float(); } if (G[0] && bvalue) @@ -383,6 +434,43 @@ namespace MR { + KeyValues Image::read_csa_ascii (const vector& data) + { + + auto split_keyval = [] (const std::string& s) -> std::pair { + const size_t delimiter = s.find_first_of ("="); + if (delimiter == std::string::npos) + return std::make_pair (std::string(), std::string()); + return std::make_pair (strip (s.substr(0, delimiter)), strip (s.substr(delimiter+1)) ); + }; + + KeyValues result; + for (const auto& item : data) { + const auto lines = split_lines (item); + bool do_read = false; + for (const auto& line : lines) { + if (line.substr(0, 17) == "### ASCCONV BEGIN") { + do_read = true; + continue; + } + if (line.substr(0, 15) == "### ASCCONV END") { + do_read = false; + continue; + } + if (do_read) { + const auto keyval = split_keyval (line); + result[keyval.first] = keyval.second; + } + } + if (do_read) { + WARN("Siemens CSA ASCII section malformed (not appropriately ended)"); + } + } + return result; + } + + + std::ostream& operator<< (std::ostream& stream, const Frame& item) { stream << ( item.instance == UINT_MAX ? 0 : item.instance ) << "#" @@ -561,7 +649,9 @@ namespace MR { } } - return pe_scheme; + if (pe_scheme.col(3).sum()) + return pe_scheme; + return pe_scheme.leftCols(3); } diff --git a/core/file/dicom/image.h b/core/file/dicom/image.h index 608a3f9f86..78982e01aa 100644 --- a/core/file/dicom/image.h +++ b/core/file/dicom/image.h @@ -1,24 +1,26 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_image_h__ #define __file_dicom_image_h__ #include #include "datatype.h" +#include "types.h" #include "file/dicom/element.h" namespace MR { @@ -46,8 +48,9 @@ namespace MR { transfer_syntax_supported = true; pe_axis = 3; pe_sign = 0; - pixel_bandwidth = bandwidth_per_pixel_phase_encode = echo_time = repetition_time = flip_angle = time_after_start = NaN; + pixel_bandwidth = bandwidth_per_pixel_phase_encode = echo_time = inversion_time = repetition_time = flip_angle = partial_fourier = time_after_start = NaN; echo_train_length = 0; + bipolar_flag = readoutmode_flag = 0; } size_t acq_dim[2], dim[2], series_num, instance, acq, sequence; @@ -59,9 +62,11 @@ namespace MR { size_t pe_axis; int pe_sign; Time acquisition_time; - default_type pixel_bandwidth, bandwidth_per_pixel_phase_encode, echo_time, repetition_time, flip_angle, time_after_start; + default_type pixel_bandwidth, bandwidth_per_pixel_phase_encode, echo_time, inversion_time, repetition_time, flip_angle, partial_fourier, time_after_start; size_t echo_train_length; + size_t bipolar_flag, readoutmode_flag; vector index; + vector flip_angles; bool operator< (const Frame& frame) const { if (series_num != frame.series_num) @@ -70,7 +75,7 @@ namespace MR { return image_type < frame.image_type; if (acq != frame.acq) return acq < frame.acq; - if (std::isfinite (distance) && std::isfinite (frame.distance) && distance != frame.distance) + if (std::isfinite (distance) && std::isfinite (frame.distance) && distance != frame.distance) return distance < frame.distance; for (size_t n = index.size(); n--;) if (index[n] != frame.index[n]) @@ -88,7 +93,7 @@ namespace MR { if (!std::isfinite (orientation_z[0])) orientation_z = orientation_x.cross (orientation_y); else { - if (!orientation_x.allFinite() || !orientation_y.allFinite()) + if (!orientation_x.allFinite() || !orientation_y.allFinite()) throw Exception ("slice orientation information missing from DICOM header!"); Eigen::Vector3 normal = orientation_x.cross (orientation_y); if (normal.dot (orientation_z) < 0.0) @@ -97,7 +102,7 @@ namespace MR { orientation_z = normal; } - if (!position_vector.allFinite()) + if (!position_vector.allFinite()) throw Exception ("slice position information missing from DICOM header!"); orientation_z.normalize(); @@ -143,12 +148,14 @@ namespace MR { void read (); void parse_item (Element& item, const std::string& dirname = ""); void decode_csa (const uint8_t* start, const uint8_t* end); + KeyValues read_csa_ascii (const vector& data); bool operator< (const Image& ima) const { return Frame::operator< (ima); } friend std::ostream& operator<< (std::ostream& stream, const Image& item); + }; diff --git a/core/file/dicom/mapper.cpp b/core/file/dicom/mapper.cpp index 8f635f0f6f..c534ce246c 100644 --- a/core/file/dicom/mapper.cpp +++ b/core/file/dicom/mapper.cpp @@ -1,24 +1,26 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "header.h" #include "phase_encoding.h" #include "image_io/default.h" #include "image_io/mosaic.h" +#include "image_io/variable_scaling.h" #include "file/dicom/mapper.h" #include "file/dicom/image.h" #include "file/dicom/series.h" @@ -50,7 +52,7 @@ namespace MR { vector frames; // loop over series list: - for (const auto series_it : series) { + for (const auto& series_it : series) { try { series_it->read(); } @@ -118,12 +120,71 @@ namespace MR { const Image& image (*(*series[0])[0]); const Frame& frame (*frames[0]); - if (std::isfinite (image.echo_time)) - H.keyval()["EchoTime"] = str (0.001 * image.echo_time, 6); - if (std::isfinite (image.flip_angle)) - H.keyval()["FlipAngle"] = str (image.flip_angle, 6); - if (std::isfinite (image.repetition_time)) - H.keyval()["RepetitionTime"] = str (0.001 * image.repetition_time, 6); + // If the value of the parameter changes for every volume, + // write the values as a comma-separated list to the header + auto import_parameter = [&] (const std::string key, + std::function functor, + const default_type multiplier) -> void + { + vector values; + for (const auto f : frames) { + const default_type value = functor (f); + if (!std::isfinite (value)) + return; + const std::string value_string = str(multiplier * value, 6); + if (values.empty() || value_string != values.back()) + values.push_back (value_string); + } + if (values.size()) + H.keyval()[key] = join(values, ","); + }; + import_parameter ("EchoTime", + [] (Frame* f) -> default_type { return f->echo_time; }, + 0.001); + import_parameter ("FlipAngle", + [] (Frame* f) -> default_type { return f->flip_angle; }, + 1.0); + import_parameter ("InversionTime", + [] (Frame* f) -> default_type { return f->inversion_time; }, + 0.001); + import_parameter ("PartialFourier", + [] (Frame* f) -> default_type { return f->partial_fourier; }, + 1.0); + import_parameter ("PixelBandwidth", + [] (Frame* f) -> default_type { return f->pixel_bandwidth; }, + 1.0); + import_parameter ("RepetitionTime", + [] (Frame* f) -> default_type { return f->repetition_time; }, + 0.001); + + if (std::isfinite (frame.bvalue)) { + if (frame.bipolar_flag) { + switch (frame.bipolar_flag) { + case 1: H.keyval()["DiffusionScheme"] = "Bipolar"; break; + case 2: H.keyval()["DiffusionScheme"] = "Monopolar"; break; + default: WARN("Unsupported DWI polarity scheme flag (" + str(frame.bipolar_flag) + ")"); + } + } else if (frame.readoutmode_flag) { + switch (frame.readoutmode_flag) { + case 1: H.keyval()["DiffusionScheme"] = "Monopolar"; break; + case 2: H.keyval()["DiffusionScheme"] = "Bipolar"; break; + default: WARN("Unsupported DWI readout mode flag (" + str(frame.readoutmode_flag) + ")"); + } + } + } + + if (frame.flip_angles.size() > 1) { + // Are all entries in the vector the same? + bool all_equal = true; + for (size_t index = 1; index != frame.flip_angles.size(); ++index) { + if (frame.flip_angles[index] != frame.flip_angles[0]) { + all_equal = false; + break; + } + } + if (!all_equal) + H.keyval()["FlipAngle"] = join (frame.flip_angles, ","); + } size_t nchannels = image.frames.size() ? 1 : image.data_size / (frame.dim[0] * frame.dim[1] * (frame.bits_alloc/8)); if (nchannels > 1) @@ -200,24 +261,49 @@ namespace MR { H.keyval()["dw_scheme"] = dw_scheme; } - PhaseEncoding::set_scheme (H, Frame::get_PE_scheme (frames, dim[1])); + try { + PhaseEncoding::set_scheme (H, Frame::get_PE_scheme (frames, dim[1])); + } catch (Exception& e) { + e.display (3); + WARN ("Malformed phase encoding information; ignored"); + } - for (size_t n = 1; n < frames.size(); ++n) // check consistency of data scaling: + bool inconsistent_scaling = false; + for (size_t n = 1; n < frames.size(); ++n) { // check consistency of data scaling: if (frames[n]->scale_intercept != frames[n-1]->scale_intercept || - frames[n]->scale_slope != frames[n-1]->scale_slope) - throw Exception ("unable to load series due to inconsistent data scaling between DICOM images"); - + frames[n]->scale_slope != frames[n-1]->scale_slope) { + if (image.images_in_mosaic) + throw Exception ("unable to load series due to inconsistent data scaling between DICOM mosaic frames"); + inconsistent_scaling = true; + INFO ("DICOM images contain inconsistency scaling - data will be rescaled and stored in 32-bit floating-point format"); + break; + } + } // Slice timing may come from a few different potential sources - vector slices_timing; + vector slices_timing_str; + vector slices_timing_float; if (image.images_in_mosaic) { if (image.mosaic_slices_timing.size() < image.images_in_mosaic) { WARN ("Number of entries in mosaic slice timing (" + str(image.mosaic_slices_timing.size()) + ") is smaller than number of images in mosaic (" + str(image.images_in_mosaic) + "); omitting"); } else { DEBUG ("Taking slice timing information from CSA mosaic info"); // CSA mosaic defines these in ms; we want them in s - for (size_t n = 0; n < image.images_in_mosaic; ++n) - slices_timing.push_back (0.001 * image.mosaic_slices_timing[n]); + // We also want to avoid floating-point precision issues resulting from + // base-10 scaling + for (size_t n = 0; n < image.images_in_mosaic; ++n) { + slices_timing_float.push_back (0.001 * image.mosaic_slices_timing[n]); + std::string temp = str(int(10.0 * image.mosaic_slices_timing[n])); + const bool neg = image.mosaic_slices_timing[n] < 0.0; + if (neg) + temp.erase (temp.begin()); + while (temp.size() < 5) + temp.insert (temp.begin(), '0'); + temp.insert (temp.begin() + temp.size() - 4, '.'); + if (neg) + temp.insert (temp.begin(), '-'); + slices_timing_str.push_back (temp); + } } } else if (std::isfinite (frame.time_after_start)) { DEBUG ("Taking slice timing information from CSA TimeAfterStart field"); @@ -225,20 +311,21 @@ namespace MR { for (size_t n = 0; n != dim[1]; ++n) min_time_after_start = std::min (min_time_after_start, frames[n]->time_after_start); for (size_t n = 0; n != dim[1]; ++n) - slices_timing.push_back (frames[n]->time_after_start - min_time_after_start); - H.keyval()["SliceTiming"] = join (slices_timing, ","); + slices_timing_float.push_back (frames[n]->time_after_start - min_time_after_start); } else if (std::isfinite (static_cast(frame.acquisition_time))) { DEBUG ("Estimating slice timing from DICOM AcquisitionTime field"); default_type min_acquisition_time = std::numeric_limits::infinity(); for (size_t n = 0; n != dim[1]; ++n) min_acquisition_time = std::min (min_acquisition_time, default_type(frames[n]->acquisition_time)); for (size_t n = 0; n != dim[1]; ++n) - slices_timing.push_back (default_type(frames[n]->acquisition_time) - min_acquisition_time); + slices_timing_float.push_back (default_type(frames[n]->acquisition_time) - min_acquisition_time); } - if (slices_timing.size()) { - const size_t slices_acquired_at_zero = std::count (slices_timing.begin(), slices_timing.end(), 0.0f); + if (slices_timing_float.size()) { + const size_t slices_acquired_at_zero = std::count (slices_timing_float.begin(), slices_timing_float.end(), 0.0f); if (slices_acquired_at_zero < (image.images_in_mosaic ? image.images_in_mosaic : dim[1])) { - H.keyval()["SliceTiming"] = join (slices_timing, ","); + H.keyval()["SliceTiming"] = slices_timing_str.size() ? + join (slices_timing_str, ",") : + join (slices_timing_float, ","); H.keyval()["MultibandAccelerationFactor"] = str (slices_acquired_at_zero); H.keyval()["SliceEncodingDirection"] = "k"; } else { @@ -284,7 +371,21 @@ namespace MR { io_handler.reset (new MR::ImageIO::Mosaic (H, frame.dim[0], frame.dim[1], H.size (0), H.size (1), H.size (2))); - } else { + } + else if (inconsistent_scaling) { + + H.reset_intensity_scaling(); + H.datatype() = DataType::Float32; + H.datatype().set_byte_order_native(); + + MR::ImageIO::VariableScaling* handler = new MR::ImageIO::VariableScaling (H); + + for (size_t n = 0; n < frames.size(); ++n) + handler->scale_factors.push_back ({ frames[n]->scale_intercept, frames[n]->scale_slope }); + + io_handler.reset (handler); + } + else { io_handler.reset (new MR::ImageIO::Default (H)); diff --git a/core/file/dicom/mapper.h b/core/file/dicom/mapper.h index 027a2feced..b50d49156a 100644 --- a/core/file/dicom/mapper.h +++ b/core/file/dicom/mapper.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_mapper_h__ #define __file_dicom_mapper_h__ diff --git a/core/file/dicom/patient.cpp b/core/file/dicom/patient.cpp index 3563dca751..74dac79d4a 100644 --- a/core/file/dicom/patient.cpp +++ b/core/file/dicom/patient.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/dicom/patient.h" #include "file/dicom/study.h" #include "file/dicom/series.h" diff --git a/core/file/dicom/patient.h b/core/file/dicom/patient.h index 4d61b61daa..ddc4c6c948 100644 --- a/core/file/dicom/patient.h +++ b/core/file/dicom/patient.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_patient_h__ #define __file_dicom_patient_h__ diff --git a/core/file/dicom/quick_scan.cpp b/core/file/dicom/quick_scan.cpp index 8c21cb0830..dfb6ef6910 100644 --- a/core/file/dicom/quick_scan.cpp +++ b/core/file/dicom/quick_scan.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/dicom/quick_scan.h" #include "file/dicom/definitions.h" #include "file/dicom/element.h" @@ -25,7 +26,7 @@ namespace MR { - bool QuickScan::read (const std::string& file_name, bool print_DICOM_fields, bool print_CSA_fields, bool force_read) + bool QuickScan::read (const std::string& file_name, bool print_DICOM_fields, bool print_CSA_fields, bool print_Phoenix, bool force_read) { filename = file_name; modality.clear(); @@ -82,7 +83,7 @@ namespace MR { if (print_DICOM_fields) print (str(item)); - if (print_CSA_fields && item.group == 0x0029U) { + if ((print_CSA_fields || print_Phoenix) && item.group == 0x0029U) { if (item.element == 0x1010U || item.element == 0x1020U || item.element == 0x1110U || @@ -90,8 +91,28 @@ namespace MR { item.element == 0x1210U || item.element == 0x1220U) { CSAEntry entry (item.data, item.data + item.size); - while (entry.parse()) - print (str (entry)); + while (entry.parse()) { + const bool is_phoenix = (strcmp ("MrPhoenixProtocol", entry.key()) == 0); + if ((print_Phoenix && is_phoenix) || (print_CSA_fields && !is_phoenix)) { + if (print_CSA_fields) { + print (str (entry)); + } else { + const auto data = entry.get_string(); + for (const auto& entry : data) + print (entry); + } + } else if (print_CSA_fields && is_phoenix) { + print (std::string("[CSA] ") + entry.key() + " (" + str(entry.num_items()) + " items): <"); + const auto data = entry.get_string(); + size_t line_count = 0; + for (const auto& entry : data) { + if (entry.size()) + line_count += 1; + line_count += std::count (entry.begin(), entry.end(), '\n'); + } + print (str(line_count) + " text lines>\n"); + } + } } } } diff --git a/core/file/dicom/quick_scan.h b/core/file/dicom/quick_scan.h index 14ff5d1952..915a081bff 100644 --- a/core/file/dicom/quick_scan.h +++ b/core/file/dicom/quick_scan.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_quick_scan_h__ #define __file_dicom_quick_scan_h__ @@ -26,7 +27,7 @@ namespace MR { class QuickScan { NOMEMALIGN public: - bool read (const std::string& file_name, bool print_DICOM_fields = false, bool print_CSA_fields = false, bool force_read = false); + bool read (const std::string& file_name, bool print_DICOM_fields = false, bool print_CSA_fields = false, bool print_Phoenix = false, bool force_read = false); std::string filename, modality; std::string patient, patient_ID, patient_DOB; diff --git a/core/file/dicom/select_cmdline.cpp b/core/file/dicom/select_cmdline.cpp index ae29635a7f..82faaaac45 100644 --- a/core/file/dicom/select_cmdline.cpp +++ b/core/file/dicom/select_cmdline.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/dicom/image.h" #include "file/dicom/series.h" #include "file/dicom/study.h" @@ -30,10 +31,70 @@ namespace MR { if (tree.size() == 0) throw Exception ("DICOM tree its empty"); + //ENVVAR name: DICOM_PATIENT + //ENVVAR when reading DICOM data, match the PatientName entry against + //ENVVAR the string provided + const char* patient_from_env = getenv ("DICOM_PATIENT"); + + //ENVVAR name: DICOM_ID + //ENVVAR when reading DICOM data, match the PatientID entry against + //ENVVAR the string provided + const char* patid_from_env = getenv ("DICOM_ID"); + + //ENVVAR name: DICOM_STUDY + //ENVVAR when reading DICOM data, match the StudyName entry against + //ENVVAR the string provided + const char* study_from_env = getenv ("DICOM_STUDY"); + + //ENVVAR name: DICOM_SERIES + //ENVVAR when reading DICOM data, match the SeriesName entry against + //ENVVAR the string provided + const char* series_from_env = getenv ("DICOM_SERIES"); + + if (patient_from_env || patid_from_env || study_from_env || series_from_env) { + + // select using environment variables: + + vector> patient; + for (size_t i = 0; i < tree.size(); i++) { + if ( (!patient_from_env || match (patient_from_env, tree[i]->name, true)) && + (!patid_from_env || match (patid_from_env, tree[i]->ID, true)) ) + patient.push_back (tree[i]); + } + if (patient.empty()) + throw Exception ("no matching patients in DICOM dataset \"" + tree.description + "\""); + if (patient.size() > 1) + throw Exception ("too many matching patients in DICOM dataset \"" + tree.description + "\""); + + vector> study; + for (size_t i = 0; i < patient[0]->size(); i++) { + if (!study_from_env || match (study_from_env, (*patient[0])[i]->name, true)) + study.push_back ((*patient[0])[i]); + } + if (study.empty()) + throw Exception ("no matching studies in DICOM dataset \"" + tree.description + "\""); + if (study.size() > 1) + throw Exception ("too many matching studies in DICOM dataset \"" + tree.description + "\""); + + for (size_t i = 0; i < study[0]->size(); i++) { + if (!series_from_env || match (series_from_env, (*study[0])[i]->name, true)) + series.push_back ((*study[0])[i]); + } + if (series.empty()) + throw Exception ("no matching studies in DICOM dataset \"" + tree.description + "\""); + + return series; + } + + + + + // select interactively: + std::string buf; - const Patient* patient_p = NULL; + const Patient* patient_p = nullptr; if (tree.size() > 1) { - while (patient_p == NULL) { + while (patient_p == nullptr) { fprintf(stderr, "Select patient (q to abort):\n"); for (size_t i = 0; i < tree.size(); i++) { fprintf (stderr, " %2" PRI_SIZET " - %s %s %s\n", @@ -44,15 +105,15 @@ namespace MR { } std::cerr << "? "; std::cin >> buf; - if (!isdigit (buf[0])) { - series.clear(); - return series; + if (buf[0] == 'q' || buf[0] == 'Q') + throw CancelException(); + if (isdigit (buf[0])) { + int n = to(buf) - 1; + if (n <= (int) tree.size()) + patient_p = tree[n].get(); } - int n = to(buf) - 1; - if (n > (int) tree.size()) + if (!patient_p) fprintf (stderr, "invalid selection - try again\n"); - else - patient_p = tree[n].get(); } } else @@ -70,9 +131,9 @@ namespace MR { } - const Study* study_p = NULL; + const Study* study_p = nullptr; if (patient.size() > 1) { - while (study_p == NULL) { + while (study_p == nullptr) { fprintf (stderr, "Select study (q to abort):\n"); for (size_t i = 0; i < patient.size(); i++) { fprintf (stderr, " %4" PRI_SIZET " - %s %s %s %s\n", @@ -84,15 +145,15 @@ namespace MR { } std::cerr << "? "; std::cin >> buf; - if (!isdigit (buf[0])) { - series.clear(); - return series; + if (buf[0] == 'q' || buf[0] == 'Q') + throw CancelException(); + if (isdigit (buf[0])) { + int n = to(buf) - 1; + if (n <= (int) patient.size()) + study_p = patient[n].get(); } - int n = to(buf) - 1; - if (n > (int) patient.size()) + if (!study_p) fprintf (stderr, "invalid selection - try again\n"); - else - study_p = patient[n].get(); } } else @@ -127,28 +188,26 @@ namespace MR { } std::cerr << "? "; std::cin >> buf; - if (!isdigit (buf[0])) { - series.clear(); - return (series); - } - vector seq; - try { - seq = parse_ints (buf); - } - catch (Exception) { - fprintf (stderr, "Invalid number sequence - please try again\n"); - seq.clear(); - } - if (seq.size()) { - for (size_t i = 0; i < seq.size(); i++) { - if (seq[i] < 0 || seq[i] >= (int) study.size()) { - fprintf (stderr, "invalid selection - try again\n"); - series.clear(); - break; + if (buf[0] == 'q' || buf[0] == 'Q') + throw CancelException(); + if (isdigit (buf[0])) { + vector seq; + try { + seq = parse_ints (buf); + for (size_t i = 0; i < seq.size(); i++) { + if (seq[i] < 0 || seq[i] >= (int) study.size()) { + series.clear(); + break; + } + series.push_back (study[seq[i]]); } - series.push_back (study[seq[i]]); + } + catch (Exception) { + seq.clear(); } } + if (series.size() == 0) + fprintf (stderr, "Invalid selection - please try again\n"); } } else series.push_back (study[0]); diff --git a/core/file/dicom/series.cpp b/core/file/dicom/series.cpp index 89d8bc9f67..4b1577a9e1 100644 --- a/core/file/dicom/series.cpp +++ b/core/file/dicom/series.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/dicom/series.h" #include "file/dicom/study.h" #include "file/dicom/patient.h" diff --git a/core/file/dicom/series.h b/core/file/dicom/series.h index 6dd4fdd7ce..640a85bd81 100644 --- a/core/file/dicom/series.h +++ b/core/file/dicom/series.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_series_h__ #define __file_dicom_series_h__ diff --git a/core/file/dicom/study.cpp b/core/file/dicom/study.cpp index d0c6a6d69e..cfb3765ea8 100644 --- a/core/file/dicom/study.cpp +++ b/core/file/dicom/study.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/dicom/patient.h" #include "file/dicom/study.h" #include "file/dicom/series.h" diff --git a/core/file/dicom/study.h b/core/file/dicom/study.h index 49a92e0442..2f4d744e57 100644 --- a/core/file/dicom/study.h +++ b/core/file/dicom/study.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_study_h__ #define __file_dicom_study_h__ diff --git a/core/file/dicom/tree.cpp b/core/file/dicom/tree.cpp index b2dd54b1f5..af0bd059e2 100644 --- a/core/file/dicom/tree.cpp +++ b/core/file/dicom/tree.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/path.h" #include "file/dicom/element.h" #include "file/dicom/quick_scan.h" @@ -116,6 +117,7 @@ namespace MR { void Tree::read (const std::string& filename) { + description = filename; ProgressBar progress ("scanning DICOM folder \"" + shorten (filename) + "\"", 0); if (Path::is_dir (filename)) read_dir (filename, progress); diff --git a/core/file/dicom/tree.h b/core/file/dicom/tree.h index c96a8d5f16..bbdbd724f4 100644 --- a/core/file/dicom/tree.h +++ b/core/file/dicom/tree.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_dicom_tree_h__ #define __file_dicom_tree_h__ diff --git a/core/file/entry.h b/core/file/entry.h index 5578098594..e13abb392e 100644 --- a/core/file/entry.h +++ b/core/file/entry.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_entry_h__ #define __file_entry_h__ diff --git a/core/file/gz.h b/core/file/gz.h index 04335a17cb..291c4b9710 100644 --- a/core/file/gz.h +++ b/core/file/gz.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_gz_h__ #define __file_gz_h__ diff --git a/core/file/json_utils.cpp b/core/file/json_utils.cpp index 418aad9705..ab5cc55b37 100644 --- a/core/file/json_utils.cpp +++ b/core/file/json_utils.cpp @@ -1,19 +1,21 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include +#include #include "file/json_utils.h" #include "file/nifti_utils.h" @@ -46,88 +48,257 @@ namespace MR } catch (std::logic_error& e) { throw Exception ("Error parsing JSON file \"" + path + "\": " + e.what()); } - for (auto i = json.cbegin(); i != json.cend(); ++i) { + read (json, H, true); + } + + + + void save (const Header& H, const std::string& json_path, const std::string& image_path) + { + nlohmann::json json; + write (H, json, image_path); + File::OFStream out (json_path); + out << json.dump(4); + } + + + + + KeyValues read (const nlohmann::json& json, const KeyValues& preexisting) + { + KeyValues result; + for (auto i = json.cbegin(); i != json.cend(); ++i) { if (i->is_boolean()) { - H.keyval().insert (std::make_pair (i.key(), i.value() ? "1" : "0")); + result.insert (std::make_pair (i.key(), i.value() ? "true" : "false")); } else if (i->is_number_integer()) { - H.keyval().insert (std::make_pair (i.key(), str(i.value()))); + result.insert (std::make_pair (i.key(), str(i.value()))); } else if (i->is_number_float()) { - H.keyval().insert (std::make_pair (i.key(), str(i.value()))); + result.insert (std::make_pair (i.key(), str(i.value()))); + } else if (i->is_string()) { + const std::string s = unquote(i.value()); + result.insert (std::make_pair (i.key(), s)); } else if (i->is_array()) { - vector s; - for (auto j = i->cbegin(); j != i->cend(); ++j) { - if (j->is_array()) { + size_t num_subarrays = 0; + for (const auto& j : *i) + if (j.is_array()) + ++num_subarrays; + if (num_subarrays == 0) { + bool all_string = true; + bool all_numeric = true; + for (const auto& k : *i) { + if (!k.is_string()) + all_string = false; + if (!(k.is_number())) + all_numeric = false; + } + if (all_string) { + vector lines; + for (const auto& k : *i) + lines.push_back (unquote(k)); + result.insert (std::make_pair (i.key(), join (lines, "\n"))); + } else if (all_numeric) { vector line; - for (auto k : *j) + for (const auto& k : *i) line.push_back (str(k)); - s.push_back (join(line, ",")); + result.insert (std::make_pair (i.key(), join (line, ","))); } else { - s.push_back (str(*j)); + throw Exception ("JSON entry \"" + i.key() + "\" is array but contains mixed data types"); } } - H.keyval().insert (std::make_pair (i.key(), join(s, "\n"))); - } else if (i->is_string()) { - const std::string s = i.value(); - H.keyval().insert (std::make_pair (i.key(), s)); + else if (num_subarrays == i->size()) { + vector s; + for (const auto& j : *i) { + vector line; + for (const auto& k : j) + line.push_back (unquote(str(k))); + s.push_back (join(line, ",")); + } + result.insert (std::make_pair (i.key(), join(s, "\n"))); + } + else + throw Exception ("JSON entry \"" + i.key() + "\" contains mixture of elements and arrays"); } - } + for (const auto& kv : preexisting) { + if (kv.first == "comments" && result.find ("comments") != result.end()) { + add_line (result["comments"], kv.second); + } else { + // Will not overwrite existing entries + result.insert (kv); + } + } + return result; + } - if (!Header::do_not_realign_transform) { - // The corresponding header may have been rotated on image load prior to the JSON - // being loaded. If this is the case, any fields that indicate an image axis - // number / direction need to be correspondingly modified. - size_t perm[3]; - bool flip[3]; - Axes::get_permutation_to_make_axial (H.transform(), perm, flip); - if (perm[0] != 0 || perm[1] != 1 || perm[2] != 2 || flip[0] || flip[1] || flip[2]) { + void read (const nlohmann::json& json, Header& header, const bool realign) + { + header.keyval() = read (json, header.keyval()); + const bool do_realign = realign && Header::do_realign_transform; + + // The corresponding header may have been rotated on image load prior to the JSON + // being loaded. If this is the case, any fields that indicate an image axis + // number / direction need to be correspondingly modified. + std::array perm; + std::array flip; + header.realignment (perm, flip); + if (perm[0] == 0 && perm[1] == 1 && perm[2] == 2 && !flip[0] && !flip[1] && !flip[2]) + return; - auto pe_scheme = PhaseEncoding::get_scheme (H); - if (pe_scheme.rows()) { - for (ssize_t row = 0; row != pe_scheme.rows(); ++row) { - auto new_line = pe_scheme.row (row); - for (ssize_t axis = 0; axis != 3; ++axis) - new_line[perm[axis]] = flip[perm[axis]] ? pe_scheme(row,axis) : -pe_scheme(row,axis); - pe_scheme.row (row) = new_line; + auto pe_scheme = PhaseEncoding::get_scheme (header); + if (pe_scheme.rows()) { + if (do_realign) { + for (ssize_t row = 0; row != pe_scheme.rows(); ++row) { + Eigen::VectorXd new_line = pe_scheme.row (row); + for (ssize_t axis = 0; axis != 3; ++axis) { + new_line[axis] = pe_scheme(row, perm[axis]); + if (new_line[axis] && flip[perm[axis]]) + new_line[axis] = -new_line[axis]; } - PhaseEncoding::set_scheme (H, pe_scheme); - INFO ("Phase encoding information read from JSON file modified according to expected input image header transform realignment"); + pe_scheme.row (row) = new_line; } + PhaseEncoding::set_scheme (header, pe_scheme); + INFO ("Phase encoding information read from JSON file modified to conform to prior MRtrix3 internal transform realignment of input image"); + } else { + INFO ("Phase encoding information read from JSON file not modified"); + } + } + + auto slice_encoding_it = header.keyval().find ("SliceEncodingDirection"); + if (slice_encoding_it != header.keyval().end()) { + if (do_realign) { + const Eigen::Vector3 orig_dir (Axes::id2dir (slice_encoding_it->second)); + Eigen::Vector3 new_dir; + for (size_t axis = 0; axis != 3; ++axis) + new_dir[axis] = flip[perm[axis]] ? -orig_dir[perm[axis]] : orig_dir[perm[axis]]; + slice_encoding_it->second = Axes::dir2id (new_dir); + INFO ("Slice encoding direction read from JSON file modified to conform to prior MRtrix3 internal transform realignment of input image"); + } else { + INFO ("Slice encoding information read from JSON file not modified"); + } + } - auto slice_encoding_it = H.keyval().find ("SliceEncodingDirection"); - if (slice_encoding_it != H.keyval().end()) { - const Eigen::Vector3 orig_dir (Axes::id2dir (slice_encoding_it->second)); - Eigen::Vector3 new_dir; - for (size_t axis = 0; axis != 3; ++axis) - new_dir[perm[axis]] = flip[perm[axis]] > 0 ? orig_dir[axis] : -orig_dir[axis]; - slice_encoding_it->second = Axes::dir2id (new_dir); - INFO ("Slice encoding direction read from JSON file modified according to expected input image header transform realignment"); + } + + + + namespace { + template + bool attempt_scalar (const std::pair& kv, nlohmann::json& json) + { + try { + const T temp = to (kv.second); + json[kv.first] = temp; + return true; + } catch (...) { } + return false; + } + bool attempt_matrix (const std::pair& kv, nlohmann::json& json) + { + try { + auto M_float = parse_matrix (kv.second); + if (M_float.cols() == 1) + M_float.transposeInPlace(); + nlohmann::json temp; + bool noninteger = false; + for (ssize_t row = 0; row != M_float.rows(); ++row) { + vector data (M_float.cols()); + for (ssize_t i = 0; i != M_float.cols(); ++i) { + data[i] = M_float (row, i); + if (std::floor (data[i]) != data[i]) + noninteger = true; + } + if (row) + temp[kv.first].push_back (data); + else if (M_float.rows() == 1) + temp[kv.first] = data; + else + temp[kv.first] = { data }; + } + if (noninteger) { + json[kv.first] = temp[kv.first]; + } else { + // No non-integer values found; + // Write the data natively as integers + auto M_int = parse_matrix (kv.second); + if (M_int.cols() == 1) + M_int.transposeInPlace(); + temp[kv.first] = nlohmann::json({}); + for (ssize_t row = 0; row != M_int.rows(); ++row) { + vector data (M_int.cols()); + for (ssize_t i = 0; i != M_int.cols(); ++i) + data[i] = M_int (row, i); + if (row) + temp[kv.first].push_back (data); + else if (M_int.rows() == 1) + temp[kv.first] = data; + else + temp[kv.first] = { data }; + } + json[kv.first] = temp[kv.first]; } + return true; + } catch (...) { return false; } + } + } + + void write (const KeyValues& keyval, nlohmann::json& json) + { + auto write_string = [] (const std::pair& kv, + nlohmann::json& json) + { + const auto lines = split_lines (kv.second); + if (lines.size() > 1) + json[kv.first] = lines; + else + json[kv.first] = kv.second; + }; + + for (const auto& kv : keyval) { + if (attempt_scalar (kv, json)) continue; + if (attempt_scalar (kv, json)) continue; + if (attempt_scalar (kv, json)) continue; + if (attempt_matrix (kv, json)) continue; + if (json.find (kv.first) == json.end()) { + write_string (kv, json); + } else { + nlohmann::json temp; + write_string (kv, temp); + if (json[kv.first] != temp[kv.first]) + json[kv.first] = "variable"; } } } - void save (const Header& H, const std::string& path) + void write (const Header& header, nlohmann::json& json, const std::string& image_path) { - nlohmann::json json; - auto pe_scheme = PhaseEncoding::get_scheme (H); + Header H_adj (header); + H_adj.name() = image_path; + if (!Path::has_suffix (image_path, { ".nii", ".nii.gz", ".img" })) { + write (H_adj.keyval(), json); + return; + } + vector order; - File::NIfTI::adjust_transform (H, order); - Header H_adj (H); - const bool axes_adjusted = (order[0] != 0 || order[1] != 1 || order[2] != 2 || H.stride(0) < 0 || H.stride(1) < 0 || H.stride(2) < 0); - if (pe_scheme.rows() && axes_adjusted) { + File::NIfTI::adjust_transform (header, order); + if (!(order[0] != 0 || order[1] != 1 || order[2] != 2 || header.stride(0) < 0 || header.stride(1) < 0 || header.stride(2) < 0)) { + write (H_adj.keyval(), json); + return; + } + + auto pe_scheme = PhaseEncoding::get_scheme (header); + if (pe_scheme.rows()) { // Assume that image being written to disk is going to have its transform adjusted, // so modify the phase encoding scheme appropriately before writing to JSON for (ssize_t row = 0; row != pe_scheme.rows(); ++row) { - Eigen::Matrix new_line = pe_scheme.row (row); + Eigen::VectorXd new_line = pe_scheme.row (row); for (ssize_t axis = 0; axis != 3; ++axis) - new_line[axis] = pe_scheme(row, order[axis]) && H.stride (order[axis]) < 0 ? + new_line[axis] = pe_scheme(row, order[axis]) && header.stride (order[axis]) < 0 ? -pe_scheme(row, order[axis]) : pe_scheme(row, order[axis]); pe_scheme.row (row) = new_line; @@ -136,18 +307,16 @@ namespace MR INFO ("Phase encoding information written to JSON file modified according to expected output NIfTI header transform realignment"); } auto slice_encoding_it = H_adj.keyval().find ("SliceEncodingDirection"); - if (slice_encoding_it != H_adj.keyval().end() && axes_adjusted) { + if (slice_encoding_it != H_adj.keyval().end()) { const Eigen::Vector3 orig_dir (Axes::id2dir (slice_encoding_it->second)); Eigen::Vector3 new_dir; for (size_t axis = 0; axis != 3; ++axis) - new_dir[order[axis]] = H.stride (order[axis]) > 0 ? orig_dir[order[axis]] : -orig_dir[order[axis]]; + new_dir[axis] = header.stride (order[axis]) > 0 ? orig_dir[order[axis]] : -orig_dir[order[axis]]; slice_encoding_it->second = Axes::dir2id (new_dir); INFO ("Slice encoding direction written to JSON file modified according to expected output NIfTI header transform realignment"); } - for (const auto& kv : H_adj.keyval()) - json[kv.first] = kv.second; - File::OFStream out (path); - out << json.dump(4); + + write (H_adj.keyval(), json); } @@ -155,4 +324,3 @@ namespace MR } } } - diff --git a/core/file/json_utils.h b/core/file/json_utils.h index 8674af5825..6d2048514f 100644 --- a/core/file/json_utils.h +++ b/core/file/json_utils.h @@ -1,22 +1,24 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_json_utils_h__ #define __file_json_utils_h__ #include "file/json.h" +#include "file/key_value.h" namespace MR { @@ -27,8 +29,19 @@ namespace MR namespace JSON { - void load (Header&, const std::string&); - void save (const Header&, const std::string&); + void load (Header& H, const std::string& path); + void save (const Header& H, const std::string& json_path, const std::string& image_path); + + KeyValues read (const nlohmann::json& json, + const KeyValues& preexisting = KeyValues()); + void read (const nlohmann::json& json, + Header& header, + const bool realign); + + void write (const KeyValues& keyval, nlohmann::json& json); + void write (const Header& header, + nlohmann::json& json, + const std::string& image_path); } } diff --git a/core/file/key_value.cpp b/core/file/key_value.cpp index eabd9fbac9..05b1889d8c 100644 --- a/core/file/key_value.cpp +++ b/core/file/key_value.cpp @@ -1,86 +1,116 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "file/key_value.h" - +#include "file/ofstream.h" +#include "app.h" namespace MR { namespace File { - - void KeyValue::open (const std::string& file, const char* first_line) + namespace KeyValue { - filename.clear(); - DEBUG ("reading key/value file \"" + file + "\"..."); - - in.open (file.c_str(), std::ios::in | std::ios::binary); - if (!in) - throw Exception ("failed to open key/value file \"" + file + "\": " + strerror (errno)); - if (first_line) { - std::string sbuf; - getline (in, sbuf); - if (sbuf.compare (0, strlen (first_line), first_line)) { - in.close(); - throw Exception ("invalid first line for key/value file \"" + file + "\" (expected \"" + first_line + "\")"); + + + + void Reader::open (const std::string& file, const char* first_line) + { + filename.clear(); + DEBUG ("reading key/value file \"" + file + "\"..."); + + in.open (file.c_str(), std::ios::in | std::ios::binary); + if (!in) + throw Exception ("failed to open key/value file \"" + file + "\": " + strerror (errno)); + if (first_line) { + std::string sbuf; + getline (in, sbuf); + if (sbuf.compare (0, strlen (first_line), first_line)) { + in.close(); + throw Exception ("invalid first line for key/value file \"" + file + "\" (expected \"" + first_line + "\")"); + } } + filename = file; } - filename = file; - } - bool KeyValue::next () - { - while (in.good()) { - std::string sbuf; - getline (in, sbuf); - if (in.bad()) - throw Exception ("error reading key/value file \"" + filename + "\": " + strerror (errno)); - - sbuf = strip (sbuf.substr (0, sbuf.find_first_of ('#'))); - if (sbuf == "END") { - in.setstate (std::ios::eofbit); - return false; - } + bool Reader::next () + { + while (in.good()) { + std::string sbuf; + getline (in, sbuf); + if (in.bad()) + throw Exception ("error reading key/value file \"" + filename + "\": " + strerror (errno)); - if (sbuf.size()) { - size_t colon = sbuf.find_first_of (':'); - if (colon == std::string::npos) { - INFO ("malformed key/value entry (\"" + sbuf + "\") in file \"" + filename + "\" - ignored"); + sbuf = strip (sbuf.substr (0, sbuf.find_first_of ('#'))); + if (sbuf == "END") { + in.setstate (std::ios::eofbit); + return false; } - else { - K = strip (sbuf.substr (0, colon)); - V = strip (sbuf.substr (colon+1)); - if (K.empty()) { + + if (sbuf.size()) { + size_t colon = sbuf.find_first_of (':'); + if (colon == std::string::npos) { INFO ("malformed key/value entry (\"" + sbuf + "\") in file \"" + filename + "\" - ignored"); } - else - return true; + else { + K = strip (sbuf.substr (0, colon)); + V = strip (sbuf.substr (colon+1)); + if (K.empty()) { + INFO ("malformed key/value entry (\"" + sbuf + "\") in file \"" + filename + "\" - ignored"); + } + else + return true; + } + } + } + return false; + } + + + + void write (File::OFStream& out, + const KeyValues& keyvals, + const std::string& prefix, + const bool add_to_command_history) + { + bool command_history_appended = false; + for (const auto& keyval : keyvals) { + const auto lines = split_lines(keyval.second); + for (const auto& line : lines) + out << prefix << keyval.first << ": " << line << "\n"; + if (add_to_command_history && keyval.first == "command_history") { + out << prefix << "command_history: " << App::command_history_string << "\n"; + command_history_appended = true; + } } + if (add_to_command_history && !command_history_appended) + out << prefix << "command_history: " << App::command_history_string << "\n"; } - return false; - } + + } } } diff --git a/core/file/key_value.h b/core/file/key_value.h index 6a7f17ac6a..d9da4f4ae4 100644 --- a/core/file/key_value.h +++ b/core/file/key_value.h @@ -1,57 +1,77 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_key_value_h__ #define __file_key_value_h__ #include #include "mrtrix.h" +#include "types.h" namespace MR { namespace File { - class KeyValue { NOMEMALIGN - public: - KeyValue () { } - KeyValue (const std::string& file, const char* first_line = NULL) { - open (file, first_line); - } - - void open (const std::string& file, const char* first_line = NULL); - bool next (); - void close () { - in.close(); - } - - const std::string& key () const throw () { - return (K); - } - const std::string& value () const throw () { - return (V); - } - const std::string& name () const throw () { - return (filename); - } - - protected: - std::string K, V, filename; - std::ifstream in; - }; + class OFStream; + + namespace KeyValue + { + + + + class Reader { NOMEMALIGN + public: + Reader () { } + Reader (const std::string& file, const char* first_line = nullptr) { + open (file, first_line); + } + + void open (const std::string& file, const char* first_line = nullptr); + bool next (); + void close () { + in.close(); + } + + const std::string& key () const throw () { + return (K); + } + const std::string& value () const throw () { + return (V); + } + const std::string& name () const throw () { + return (filename); + } + + protected: + std::string K, V, filename; + std::ifstream in; + }; + + + + + void write (File::OFStream& out, + const KeyValues& keyvals, + const std::string& prefix, + const bool add_to_command_history = true); + + + } } } diff --git a/core/file/mgh.cpp b/core/file/mgh.cpp index f7d43027b9..a9db7a76dc 100644 --- a/core/file/mgh.cpp +++ b/core/file/mgh.cpp @@ -1,24 +1,24 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "header.h" #include "file/ofstream.h" #include "file/mgh.h" -#include "file/nifti1_utils.h" namespace MR { diff --git a/core/file/mgh.h b/core/file/mgh.h index b34b4657f5..b762beed36 100644 --- a/core/file/mgh.h +++ b/core/file/mgh.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_mgh_h__ #define __file_mgh_h__ diff --git a/core/file/mmap.cpp b/core/file/mmap.cpp index 7d3b693dcd..c9580b153c 100644 --- a/core/file/mmap.cpp +++ b/core/file/mmap.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include #include diff --git a/core/file/mmap.h b/core/file/mmap.h index f3f35d579b..ddaa9974c0 100644 --- a/core/file/mmap.h +++ b/core/file/mmap.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_mmap_h__ #define __file_mmap_h__ diff --git a/core/file/name_parser.cpp b/core/file/name_parser.cpp index bdea3e6d41..c349551c38 100644 --- a/core/file/name_parser.cpp +++ b/core/file/name_parser.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include #include "file/name_parser.h" diff --git a/core/file/name_parser.h b/core/file/name_parser.h index d35a827c83..45bdc416ef 100644 --- a/core/file/name_parser.h +++ b/core/file/name_parser.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_name_parser_h__ #define __file_name_parser_h__ diff --git a/core/file/nifti1_utils.cpp b/core/file/nifti1_utils.cpp deleted file mode 100644 index 4dd32c06b2..0000000000 --- a/core/file/nifti1_utils.cpp +++ /dev/null @@ -1,468 +0,0 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ - * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/ - */ - - -#include "header.h" -#include "raw.h" -#include "file/config.h" -#include "file/json_utils.h" -#include "file/nifti_utils.h" -#include "file/nifti1_utils.h" - -namespace MR -{ - namespace File - { - namespace NIfTI1 - { - - - - size_t read (Header& H, const nifti_1_header& NH) - { - bool is_BE = false; - if (Raw::fetch_ (&NH.sizeof_hdr, is_BE) != header_size) { - is_BE = true; - if (Raw::fetch_ (&NH.sizeof_hdr, is_BE) != header_size) - throw Exception ("image \"" + H.name() + "\" is not in NIfTI-1.1 format (sizeof_hdr != " + str(header_size) + ")"); - } - - bool is_nifti = true; - if (memcmp (NH.magic, "n+1\0", 4) && memcmp (NH.magic, "ni1\0", 4)) { - is_nifti = false; - DEBUG ("assuming image \"" + H.name() + "\" is in AnalyseAVW format."); - } - - char db_name[19]; - strncpy (db_name, NH.db_name, 18); - if (db_name[0]) { - db_name[18] = '\0'; - add_line (H.keyval()["comments"], db_name); - } - - // data set dimensions: - int ndim = Raw::fetch_ (&NH.dim, is_BE); - if (ndim < 1) - throw Exception ("too few dimensions specified in NIfTI-1.1 image \"" + H.name() + "\""); - if (ndim > 7) - throw Exception ("too many dimensions specified in NIfTI-1.1 image \"" + H.name() + "\""); - H.ndim() = ndim; - - - for (int i = 0; i < ndim; i++) { - H.size(i) = Raw::fetch_ (&NH.dim[i+1], is_BE); - if (H.size (i) < 0) { - INFO ("dimension along axis " + str (i) + " specified as negative in NIfTI-1.1 image \"" + H.name() + "\" - taking absolute value"); - H.size(i) = abs (H.size (i)); - } - if (!H.size (i)) - H.size(i) = 1; - H.stride(i) = i+1; - } - - // data type: - DataType dtype; - switch (Raw::fetch_ (&NH.datatype, is_BE)) { - case DT_BINARY: - dtype = DataType::Bit; - break; - case DT_INT8: - dtype = DataType::Int8; - break; - case DT_UINT8: - dtype = DataType::UInt8; - break; - case DT_INT16: - dtype = DataType::Int16; - break; - case DT_UINT16: - dtype = DataType::UInt16; - break; - case DT_INT32: - dtype = DataType::Int32; - break; - case DT_UINT32: - dtype = DataType::UInt32; - break; - case DT_INT64: - dtype = DataType::Int64; - break; - case DT_UINT64: - dtype = DataType::UInt64; - break; - case DT_FLOAT32: - dtype = DataType::Float32; - break; - case DT_FLOAT64: - dtype = DataType::Float64; - break; - case DT_COMPLEX64: - dtype = DataType::CFloat32; - break; - case DT_COMPLEX128: - dtype = DataType::CFloat64; - break; - default: - throw Exception ("unknown data type for NIfTI-1.1 image \"" + H.name() + "\""); - } - - if (! (dtype.is (DataType::Bit) || dtype.is (DataType::UInt8) || dtype.is (DataType::Int8))) { - if (is_BE) - dtype.set_flag (DataType::BigEndian); - else - dtype.set_flag (DataType::LittleEndian); - } - - if (Raw::fetch_ (&NH.bitpix, is_BE) != (int16_t) dtype.bits()) - WARN ("bitpix field does not match data type in NIfTI-1.1 image \"" + H.name() + "\" - ignored"); - - H.datatype() = dtype; - - // voxel sizes: - for (int i = 0; i < ndim; i++) { - H.spacing(i) = Raw::fetch_ (&NH.pixdim[i+1], is_BE); - if (H.spacing (i) < 0.0) { - INFO ("voxel size along axis " + str (i) + " specified as negative in NIfTI-1.1 image \"" + H.name() + "\" - taking absolute value"); - H.spacing(i) = abs (H.spacing (i)); - } - } - - - // offset and scale: - H.intensity_scale() = Raw::fetch_ (&NH.scl_slope, is_BE); - if (std::isfinite (H.intensity_scale()) && H.intensity_scale() != 0.0) { - H.intensity_offset() = Raw::fetch_ (&NH.scl_inter, is_BE); - if (!std::isfinite (H.intensity_offset())) - H.intensity_offset() = 0.0; - } - else - H.reset_intensity_scaling(); - - size_t data_offset = (size_t) Raw::fetch_ (&NH.vox_offset, is_BE); - - char descrip[81]; - strncpy (descrip, NH.descrip, 80); - if (descrip[0]) { - descrip[80] = '\0'; - if (strncmp (descrip, "MRtrix version: ", 16) == 0) - H.keyval()["mrtrix_version"] = descrip+16; - else - add_line (H.keyval()["comments"], descrip); - } - - if (is_nifti) { - bool sform_code = Raw::fetch_ (&NH.sform_code, is_BE); - if (sform_code) { - auto& M (H.transform().matrix()); - - M(0,0) = Raw::fetch_ (&NH.srow_x[0], is_BE); - M(0,1) = Raw::fetch_ (&NH.srow_x[1], is_BE); - M(0,2) = Raw::fetch_ (&NH.srow_x[2], is_BE); - M(0,3) = Raw::fetch_ (&NH.srow_x[3], is_BE); - - M(1,0) = Raw::fetch_ (&NH.srow_y[0], is_BE); - M(1,1) = Raw::fetch_ (&NH.srow_y[1], is_BE); - M(1,2) = Raw::fetch_ (&NH.srow_y[2], is_BE); - M(1,3) = Raw::fetch_ (&NH.srow_y[3], is_BE); - - M(2,0) = Raw::fetch_ (&NH.srow_z[0], is_BE); - M(2,1) = Raw::fetch_ (&NH.srow_z[1], is_BE); - M(2,2) = Raw::fetch_ (&NH.srow_z[2], is_BE); - M(2,3) = Raw::fetch_ (&NH.srow_z[3], is_BE); - - // check voxel sizes: - for (size_t axis = 0; axis != 3; ++axis) { - if (size_t(ndim) > axis) - if (abs(H.spacing(axis) - M.col(axis).head<3>().norm()) > 1e-4) { - WARN ("voxel spacings inconsistent between NIFTI s-form and header field pixdim"); - break; - } - } - - // normalize each transform axis: - for (size_t axis = 0; axis != 3; ++axis) { - if (size_t(ndim) > axis) - M.col(axis).normalize(); - } - - } - - if (Raw::fetch_ (&NH.qform_code, is_BE)) { - transform_type M_qform; - - Eigen::Quaterniond Q (0.0, Raw::fetch_ (&NH.quatern_b, is_BE), Raw::fetch_ (&NH.quatern_c, is_BE), Raw::fetch_ (&NH.quatern_d, is_BE)); - const double w = 1.0 - Q.squaredNorm(); - if (w < 1.0e-7) - Q.normalize(); - else - Q.w() = std::sqrt (w); - M_qform.matrix().topLeftCorner<3,3>() = Q.matrix(); - - M_qform.translation()[0] = Raw::fetch_ (&NH.qoffset_x, is_BE); - M_qform.translation()[1] = Raw::fetch_ (&NH.qoffset_y, is_BE); - M_qform.translation()[2] = Raw::fetch_ (&NH.qoffset_z, is_BE); - - // qfac: - float qfac = Raw::fetch_ (&NH.pixdim[0], is_BE) >= 0.0 ? 1.0 : -1.0; - if (qfac < 0.0) - M_qform.matrix().col(2) *= qfac; - - if (sform_code) { - Header header2 (H); - header2.transform() = M_qform; - if (!voxel_grids_match_in_scanner_space (H, header2, 0.1)) { - //CONF option: NIfTIUseSform - //CONF default: 0 (false) - //CONF A boolean value to control whether, in cases where both - //CONF the sform and qform transformations are defined in an - //CONF input NIfTI image, but those transformations differ, the - //CONF sform transformation should be used in preference to the - //CONF qform matrix (the default behaviour). - const bool use_sform = File::Config::get_bool ("NIfTIUseSform", false); - WARN ("qform and sform are inconsistent in NIfTI image \"" + H.name() + "\" - using " + (use_sform ? "sform" : "qform")); - if (!use_sform) - H.transform() = M_qform; - } - } - else - H.transform() = M_qform; - } - - //CONF option: NIfTIAutoLoadJSON - //CONF default: 0 (false) - //CONF A boolean value to indicate whether, when opening NIfTI images, - //CONF any corresponding JSON file should be automatically loaded. - if (File::Config::get_bool ("NIfTIAutoLoadJSON", false)) { - std::string json_path = H.name(); - if (Path::has_suffix (json_path, ".nii.gz")) - json_path = json_path.substr (0, json_path.size()-7); - else if (Path::has_suffix (json_path, ".nii")) - json_path = json_path.substr (0, json_path.size()-4); - else - assert (0); - json_path += ".json"; - if (Path::exists (json_path)) - File::JSON::load (H, json_path); - } - - } - else { - H.transform()(0,0) = std::numeric_limits::quiet_NaN(); - //CONF option: AnalyseLeftToRight - //CONF default: 0 (false) - //CONF A boolean value to indicate whether images in Analyse format - //CONF should be assumed to be in LAS orientation (default) or RAS - //CONF (when this is option is turned on). - if (!File::Config::get_bool ("AnalyseLeftToRight", false)) - H.stride(0) = -H.stride (0); - if (!File::NIfTI::right_left_warning_issued) { - INFO ("assuming Analyse images are encoded " + std::string (H.stride (0) > 0 ? "left to right" : "right to left")); - File::NIfTI::right_left_warning_issued = true; - } - } - - return data_offset; - } - - - - - - - - void write (nifti_1_header& NH, const Header& H, const bool single_file) - { - if (H.ndim() > 7) - throw Exception ("NIfTI-1.1 format cannot support more than 7 dimensions for image \"" + H.name() + "\""); - - bool is_BE = H.datatype().is_big_endian(); - - vector axes; - auto M = File::NIfTI::adjust_transform (H, axes); - - memset (&NH, 0, sizeof (NH)); - - // magic number: - Raw::store (header_size, &NH.sizeof_hdr, is_BE); - - const auto hit = H.keyval().find("comments"); - auto comments = split_lines (hit == H.keyval().end() ? std::string() : hit->second); - strncpy ( (char*) &NH.db_name, comments.size() ? comments[0].c_str() : "untitled\0\0\0\0\0\0\0\0\0\0\0", 18); - Raw::store (16384, &NH.extents, is_BE); - NH.regular = 'r'; - NH.dim_info = 0; - - // data set dimensions: - Raw::store (H.ndim(), &NH.dim[0], is_BE); - { - size_t i = 0; - for (; i < 3; i++) - Raw::store (H.size (axes[i]), &NH.dim[i+1], is_BE); - for (; i < H.ndim(); i++) - Raw::store (H.size (i), &NH.dim[i+1], is_BE); - - // pad out the other dimensions with 1, fix for fslview - ++i; - for (; i < 8; i++) - Raw::store (1, &NH.dim[i], is_BE); - } - - - // data type: - int16_t dt = 0; - switch (H.datatype()()) { - case DataType::Bit: - dt = DT_BINARY; - break; - case DataType::Int8: - dt = DT_INT8; - break; - case DataType::UInt8: - dt = DT_UINT8; - break; - case DataType::Int16LE: - case DataType::Int16BE: - dt = DT_INT16; - break; - case DataType::UInt16LE: - case DataType::UInt16BE: - dt = DT_UINT16; - break; - case DataType::Int32LE: - case DataType::Int32BE: - dt = DT_INT32; - break; - case DataType::UInt32LE: - case DataType::UInt32BE: - dt = DT_UINT32; - break; - case DataType::Int64LE: - case DataType::Int64BE: - dt = DT_INT64; - break; - case DataType::UInt64LE: - case DataType::UInt64BE: - dt = DT_UINT64; - break; - case DataType::Float32LE: - case DataType::Float32BE: - dt = DT_FLOAT32; - break; - case DataType::Float64LE: - case DataType::Float64BE: - dt = DT_FLOAT64; - break; - case DataType::CFloat32LE: - case DataType::CFloat32BE: - dt = DT_COMPLEX64; - break; - case DataType::CFloat64LE: - case DataType::CFloat64BE: - dt = DT_COMPLEX128; - break; - default: - throw Exception ("unknown data type for NIfTI-1.1 image \"" + H.name() + "\""); - } - - Raw::store (dt, &NH.datatype, is_BE); - Raw::store (H.datatype().bits(), &NH.bitpix, is_BE); - - Raw::store (1.0, &NH.pixdim[0], is_BE); - // voxel sizes: - for (size_t i = 0; i < 3; ++i) - Raw::store (H.spacing (axes[i]), &NH.pixdim[i+1], is_BE); - for (size_t i = 3; i < H.ndim(); ++i) - Raw::store (H.spacing (i), &NH.pixdim[i+1], is_BE); - - Raw::store (float32(header_with_ext_size), &NH.vox_offset, is_BE); - - // offset and scale: - Raw::store (H.intensity_scale(), &NH.scl_slope, is_BE); - Raw::store (H.intensity_offset(), &NH.scl_inter, is_BE); - - NH.xyzt_units = SPACE_TIME_TO_XYZT (NIFTI_UNITS_MM, NIFTI_UNITS_SEC); - - memset ((char*) &NH.descrip, 0, 80); - std::string version_string = std::string("MRtrix version: ") + App::mrtrix_version; - if (App::project_version) - version_string += std::string(", project version: ") + App::project_version; - strncpy ( (char*) &NH.descrip, version_string.c_str(), 79); - - Raw::store (NIFTI_XFORM_SCANNER_ANAT, &NH.qform_code, is_BE); - Raw::store (NIFTI_XFORM_SCANNER_ANAT, &NH.sform_code, is_BE); - - // qform: - Eigen::Matrix3d R = M.matrix().topLeftCorner<3,3>(); - if (R.determinant() < 0.0) { - R.col(2) = -R.col(2); - Raw::store (-1.0, &NH.pixdim[0], is_BE); - } - Eigen::Quaterniond Q (R); - - if (Q.w() < 0.0) - Q.vec() = -Q.vec(); - - Raw::store (Q.x(), &NH.quatern_b, is_BE); - Raw::store (Q.y(), &NH.quatern_c, is_BE); - Raw::store (Q.z(), &NH.quatern_d, is_BE); - - - // sform: - - Raw::store (M(0,3), &NH.qoffset_x, is_BE); - Raw::store (M(1,3), &NH.qoffset_y, is_BE); - Raw::store (M(2,3), &NH.qoffset_z, is_BE); - - Raw::store (H.spacing (axes[0]) * M(0,0), &NH.srow_x[0], is_BE); - Raw::store (H.spacing (axes[1]) * M(0,1), &NH.srow_x[1], is_BE); - Raw::store (H.spacing (axes[2]) * M(0,2), &NH.srow_x[2], is_BE); - Raw::store (M (0,3), &NH.srow_x[3], is_BE); - - Raw::store (H.spacing (axes[0]) * M(1,0), &NH.srow_y[0], is_BE); - Raw::store (H.spacing (axes[1]) * M(1,1), &NH.srow_y[1], is_BE); - Raw::store (H.spacing (axes[2]) * M(1,2), &NH.srow_y[2], is_BE); - Raw::store (M (1,3), &NH.srow_y[3], is_BE); - - Raw::store (H.spacing (axes[0]) * M(2,0), &NH.srow_z[0], is_BE); - Raw::store (H.spacing (axes[1]) * M(2,1), &NH.srow_z[1], is_BE); - Raw::store (H.spacing (axes[2]) * M(2,2), &NH.srow_z[2], is_BE); - Raw::store (M (2,3), &NH.srow_z[3], is_BE); - - strncpy ( (char*) &NH.magic, single_file ? "n+1\0" : "ni1\0", 4); - - //CONF option: NIfTIAutoSaveJSON - //CONF default: 0 (false) - //CONF A boolean value to indicate whether, when writing NIfTI images, - //CONF a corresponding JSON file should be automatically created in order - //CONF to save any header entries that cannot be stored in the NIfTI - //CONF header. - if (single_file && File::Config::get_bool ("NIfTIAutoSaveJSON", false)) { - std::string json_path = H.name(); - if (Path::has_suffix (json_path, ".nii.gz")) - json_path = json_path.substr (0, json_path.size()-7); - else if (Path::has_suffix (json_path, ".nii")) - json_path = json_path.substr (0, json_path.size()-4); - else - assert (0); - json_path += ".json"; - File::JSON::save (H, json_path); - } - } - - - - } - } -} - diff --git a/core/file/nifti2.h b/core/file/nifti2.h index 661f52a7d4..cefe104a2f 100644 --- a/core/file/nifti2.h +++ b/core/file/nifti2.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __NIFTI2_HEADER #define __NIFTI2_HEADER diff --git a/core/file/nifti2_utils.cpp b/core/file/nifti2_utils.cpp deleted file mode 100644 index 01cc06a6d9..0000000000 --- a/core/file/nifti2_utils.cpp +++ /dev/null @@ -1,421 +0,0 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ - * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/ - */ - - -#include "header.h" -#include "raw.h" -#include "file/config.h" -#include "file/json_utils.h" -#include "file/nifti1.h" -#include "file/nifti_utils.h" -#include "file/nifti2_utils.h" - -namespace MR -{ - namespace File - { - namespace NIfTI2 - { - - - - size_t read (Header& H, const nifti_2_header& NH) - { - bool is_BE = false; - if (Raw::fetch_ (&NH.sizeof_hdr, is_BE) != header_size) { - is_BE = true; - if (Raw::fetch_ (&NH.sizeof_hdr, is_BE) != header_size) - throw Exception ("image \"" + H.name() + "\" is not in NIfTI-2 format (sizeof_hdr != " + str(header_size) + ")"); - } - - if (memcmp (NH.magic, "n+2\0", 4) && memcmp (NH.magic, "ni2\0", 4)) - throw Exception ("image \"" + H.name() + "\" is not in NIfTI-2 format (invalid magic signature)"); - - if (memcmp (NH.magic+4, signature_extra, 4)) - WARN ("possible file transfer corruption of file \"" + H.name() + "\" (invalid magic signature)"); - - // data type: - DataType dtype; - switch (Raw::fetch_ (&NH.datatype, is_BE)) { - case DT_BINARY: - dtype = DataType::Bit; - break; - case DT_INT8: - dtype = DataType::Int8; - break; - case DT_UINT8: - dtype = DataType::UInt8; - break; - case DT_INT16: - dtype = DataType::Int16; - break; - case DT_UINT16: - dtype = DataType::UInt16; - break; - case DT_INT32: - dtype = DataType::Int32; - break; - case DT_UINT32: - dtype = DataType::UInt32; - break; - case DT_INT64: - dtype = DataType::Int64; - break; - case DT_UINT64: - dtype = DataType::UInt64; - break; - case DT_FLOAT32: - dtype = DataType::Float32; - break; - case DT_FLOAT64: - dtype = DataType::Float64; - break; - case DT_COMPLEX64: - dtype = DataType::CFloat32; - break; - case DT_COMPLEX128: - dtype = DataType::CFloat64; - break; - default: - throw Exception ("unknown data type for NIfTI-2 image \"" + H.name() + "\""); - } - - if (! (dtype.is (DataType::Bit) || dtype.is (DataType::UInt8) || dtype.is (DataType::Int8))) { - if (is_BE) - dtype.set_flag (DataType::BigEndian); - else - dtype.set_flag (DataType::LittleEndian); - } - - H.datatype() = dtype; - - if (Raw::fetch_ (&NH.bitpix, is_BE) != (int16_t) dtype.bits()) - WARN ("bitpix field does not match data type in NIfTI-2 image \"" + H.name() + "\" - ignored"); - - // data set dimensions: - const int64_t ndim = Raw::fetch_ (&NH.dim, is_BE); - if (ndim < 1) - throw Exception ("too few dimensions specified in NIfTI-2 image \"" + H.name() + "\""); - if (ndim > 7) - throw Exception ("too many dimensions specified in NIfTI-2 image \"" + H.name() + "\""); - H.ndim() = ndim; - for (int64_t i = 0; i != ndim; i++) { - H.size(i) = Raw::fetch_ (&NH.dim[i+1], is_BE); - if (H.size (i) < 0) { - INFO ("dimension along axis " + str (i) + " specified as negative in NIfTI-2 image \"" + H.name() + "\" - taking absolute value"); - H.size(i) = abs (H.size (i)); - } - if (!H.size (i)) - H.size(i) = 1; - H.stride(i) = i+1; - } - - // voxel sizes: - for (int i = 0; i < ndim; i++) { - H.spacing(i) = Raw::fetch_ (&NH.pixdim[i+1], is_BE); - if (H.spacing (i) < 0.0) { - INFO ("voxel size along axis " + str (i) + " specified as negative in NIfTI-2 image \"" + H.name() + "\" - taking absolute value"); - H.spacing(i) = abs (H.spacing (i)); - } - } - - const int64_t data_offset = Raw::fetch_ (&NH.vox_offset, is_BE); - - // offset and scale: - H.intensity_scale() = Raw::fetch_ (&NH.scl_slope, is_BE); - if (std::isfinite (H.intensity_scale()) && H.intensity_scale() != 0.0) { - H.intensity_offset() = Raw::fetch_ (&NH.scl_inter, is_BE); - if (!std::isfinite (H.intensity_offset())) - H.intensity_offset() = 0.0; - } else { - H.reset_intensity_scaling(); - } - - char descrip[81]; - strncpy (descrip, NH.descrip, 80); - if (descrip[0]) { - descrip[80] = '\0'; - if (strncmp (descrip, "MRtrix version: ", 16) == 0) - H.keyval()["mrtrix_version"] = descrip+16; - else - add_line (H.keyval()["comments"], descrip); - } - - // Note: Unlike reading from a nifti_1_header class, here we - // don't have to worry about whether or not the file is in - // Analyse format; we can treat it as a NIfTI regardless of - // whether the hedaer & data are in the same file or not. - bool sform_code = Raw::fetch_ (&NH.sform_code, is_BE); - if (sform_code) { - auto& M (H.transform().matrix()); - - M(0,0) = Raw::fetch_ (&NH.srow_x[0], is_BE); - M(0,1) = Raw::fetch_ (&NH.srow_x[1], is_BE); - M(0,2) = Raw::fetch_ (&NH.srow_x[2], is_BE); - M(0,3) = Raw::fetch_ (&NH.srow_x[3], is_BE); - - M(1,0) = Raw::fetch_ (&NH.srow_y[0], is_BE); - M(1,1) = Raw::fetch_ (&NH.srow_y[1], is_BE); - M(1,2) = Raw::fetch_ (&NH.srow_y[2], is_BE); - M(1,3) = Raw::fetch_ (&NH.srow_y[3], is_BE); - - M(2,0) = Raw::fetch_ (&NH.srow_z[0], is_BE); - M(2,1) = Raw::fetch_ (&NH.srow_z[1], is_BE); - M(2,2) = Raw::fetch_ (&NH.srow_z[2], is_BE); - M(2,3) = Raw::fetch_ (&NH.srow_z[3], is_BE); - - // check voxel sizes: - for (size_t axis = 0; axis != 3; ++axis) { - if (size_t(ndim) > axis) - if (abs(H.spacing(axis) - M.col(axis).head<3>().norm()) > 1e-4) { - WARN ("voxel spacings inconsistent between NIFTI s-form and header field pixdim"); - break; - } - } - - // normalize each transform axis: - for (size_t axis = 0; axis != 3; ++axis) { - if (size_t(ndim) > axis) - M.col(axis).normalize(); - } - - } - - if (Raw::fetch_ (&NH.qform_code, is_BE)) { - transform_type M_qform; - - Eigen::Quaterniond Q (0.0, Raw::fetch_ (&NH.quatern_b, is_BE), Raw::fetch_ (&NH.quatern_c, is_BE), Raw::fetch_ (&NH.quatern_d, is_BE)); - const double w = 1.0 - Q.squaredNorm(); - if (w < 1.0e-15) - Q.normalize(); - else - Q.w() = std::sqrt (w); - M_qform.matrix().topLeftCorner<3,3>() = Q.matrix(); - - M_qform.translation()[0] = Raw::fetch_ (&NH.qoffset_x, is_BE); - M_qform.translation()[1] = Raw::fetch_ (&NH.qoffset_y, is_BE); - M_qform.translation()[2] = Raw::fetch_ (&NH.qoffset_z, is_BE); - - // qfac: - const float64 qfac = Raw::fetch_ (&NH.pixdim[0], is_BE) >= 0.0 ? 1.0 : -1.0; - if (qfac < 0.0) - M_qform.matrix().col(2) *= qfac; - - if (sform_code) { - Header header2 (H); - header2.transform() = M_qform; - if (!voxel_grids_match_in_scanner_space (H, header2, 0.1)) { - const bool use_sform = File::Config::get_bool ("NIfTIUseSform", false); - WARN ("qform and sform are inconsistent in NIfTI image \"" + H.name() + "\" - using " + (use_sform ? "sform" : "qform")); - if (!use_sform) - H.transform() = M_qform; - } - } - else - H.transform() = M_qform; - } - - if (File::Config::get_bool ("NIfTIAutoLoadJSON", false)) { - std::string json_path = H.name(); - if (Path::has_suffix (json_path, ".nii.gz")) - json_path = json_path.substr (0, json_path.size()-7); - else if (Path::has_suffix (json_path, ".nii")) - json_path = json_path.substr (0, json_path.size()-4); - else - assert (0); - json_path += ".json"; - if (Path::exists (json_path)) - File::JSON::load (H, json_path); - } - - return data_offset; - } - - - - - - - void write (nifti_2_header& NH, const Header& H, const bool single_file) - { - if (H.ndim() > 7) - throw Exception ("NIfTI-2 format cannot support more than 7 dimensions for image \"" + H.name() + "\""); - - bool is_BE = H.datatype().is_big_endian(); - - vector axes; - auto M = File::NIfTI::adjust_transform (H, axes); - - - memset (&NH, 0, sizeof (NH)); - - // magic number: - Raw::store (header_size, &NH.sizeof_hdr, is_BE); - - memcpy (&NH.magic, single_file ? "n+2\0" : "ni2\0", 4); - memcpy (&NH.magic+4, signature_extra, 4); - - // data type: - int16_t dt = 0; - switch (H.datatype()()) { - case DataType::Bit: - dt = DT_BINARY; - break; - case DataType::Int8: - dt = DT_INT8; - break; - case DataType::UInt8: - dt = DT_UINT8; - break; - case DataType::Int16LE: - case DataType::Int16BE: - dt = DT_INT16; - break; - case DataType::UInt16LE: - case DataType::UInt16BE: - dt = DT_UINT16; - break; - case DataType::Int32LE: - case DataType::Int32BE: - dt = DT_INT32; - break; - case DataType::UInt32LE: - case DataType::UInt32BE: - dt = DT_UINT32; - break; - case DataType::Int64LE: - case DataType::Int64BE: - dt = DT_INT64; - break; - case DataType::UInt64LE: - case DataType::UInt64BE: - dt = DT_UINT64; - break; - case DataType::Float32LE: - case DataType::Float32BE: - dt = DT_FLOAT32; - break; - case DataType::Float64LE: - case DataType::Float64BE: - dt = DT_FLOAT64; - break; - case DataType::CFloat32LE: - case DataType::CFloat32BE: - dt = DT_COMPLEX64; - break; - case DataType::CFloat64LE: - case DataType::CFloat64BE: - dt = DT_COMPLEX128; - break; - default: - throw Exception ("unknown data type for NIfTI-2 image \"" + H.name() + "\""); - } - Raw::store (dt, &NH.datatype, is_BE); - - Raw::store (H.datatype().bits(), &NH.bitpix, is_BE); - - // data set dimensions: - Raw::store (H.ndim(), &NH.dim[0], is_BE); - { - size_t i = 0; - for (; i < 3; i++) - Raw::store (H.size (axes[i]), &NH.dim[i+1], is_BE); - for (; i < H.ndim(); i++) - Raw::store (H.size (i), &NH.dim[i+1], is_BE); - - // pad out the other dimensions with 1, fix for fslview - ++i; - for (; i < 8; i++) - Raw::store (1, &NH.dim[i], is_BE); - } - - Raw::store (1.0, &NH.pixdim[0], is_BE); - // voxel sizes: - for (size_t i = 0; i < 3; ++i) - Raw::store (H.spacing (axes[i]), &NH.pixdim[i+1], is_BE); - for (size_t i = 3; i < H.ndim(); ++i) - Raw::store (H.spacing (i), &NH.pixdim[i+1], is_BE); - - Raw::store (int64_t(header_with_ext_size), &NH.vox_offset, is_BE); - - // offset and scale: - Raw::store (H.intensity_scale(), &NH.scl_slope, is_BE); - Raw::store (H.intensity_offset(), &NH.scl_inter, is_BE); - - std::string version_string = std::string("MRtrix version: ") + App::mrtrix_version; - if (App::project_version) - version_string += std::string(", project version: ") + App::project_version; - strncpy ( (char*) &NH.descrip, version_string.c_str(), 79); - - Raw::store (NIFTI_XFORM_SCANNER_ANAT, &NH.qform_code, is_BE); - Raw::store (NIFTI_XFORM_SCANNER_ANAT, &NH.sform_code, is_BE); - - // qform: - Eigen::Matrix3d R = M.matrix().topLeftCorner<3,3>(); - if (R.determinant() < 0.0) { - R.col(2) = -R.col(2); - Raw::store (-1.0, &NH.pixdim[0], is_BE); - } - Eigen::Quaterniond Q (R); - - if (Q.w() < 0.0) - Q.vec() = -Q.vec(); - - Raw::store (Q.x(), &NH.quatern_b, is_BE); - Raw::store (Q.y(), &NH.quatern_c, is_BE); - Raw::store (Q.z(), &NH.quatern_d, is_BE); - - Raw::store (M(0,3), &NH.qoffset_x, is_BE); - Raw::store (M(1,3), &NH.qoffset_y, is_BE); - Raw::store (M(2,3), &NH.qoffset_z, is_BE); - - // sform: - Raw::store (H.spacing (axes[0]) * M(0,0), &NH.srow_x[0], is_BE); - Raw::store (H.spacing (axes[1]) * M(0,1), &NH.srow_x[1], is_BE); - Raw::store (H.spacing (axes[2]) * M(0,2), &NH.srow_x[2], is_BE); - Raw::store (M (0,3), &NH.srow_x[3], is_BE); - - Raw::store (H.spacing (axes[0]) * M(1,0), &NH.srow_y[0], is_BE); - Raw::store (H.spacing (axes[1]) * M(1,1), &NH.srow_y[1], is_BE); - Raw::store (H.spacing (axes[2]) * M(1,2), &NH.srow_y[2], is_BE); - Raw::store (M (1,3), &NH.srow_y[3], is_BE); - - Raw::store (H.spacing (axes[0]) * M(2,0), &NH.srow_z[0], is_BE); - Raw::store (H.spacing (axes[1]) * M(2,1), &NH.srow_z[1], is_BE); - Raw::store (H.spacing (axes[2]) * M(2,2), &NH.srow_z[2], is_BE); - Raw::store (M (2,3), &NH.srow_z[3], is_BE); - - const char xyzt_units[4] { NIFTI_UNITS_MM, NIFTI_UNITS_MM, NIFTI_UNITS_MM, NIFTI_UNITS_SEC }; - const int32_t* const xyzt_units_as_int_ptr = reinterpret_cast(xyzt_units); - Raw::store (*xyzt_units_as_int_ptr, &NH.xyzt_units, is_BE); - - if (File::Config::get_bool ("NIfTI.AutoSaveJSON", false)) { - std::string json_path = H.name(); - if (Path::has_suffix (json_path, ".nii.gz")) - json_path = json_path.substr (0, json_path.size()-7); - else if (Path::has_suffix (json_path, ".nii")) - json_path = json_path.substr (0, json_path.size()-4); - else - assert (0); - json_path += ".json"; - File::JSON::save (H, json_path); - } - } - - - - } - } -} - diff --git a/core/file/nifti_utils.cpp b/core/file/nifti_utils.cpp index 80fe4a1ab4..632b71655d 100644 --- a/core/file/nifti_utils.cpp +++ b/core/file/nifti_utils.cpp @@ -1,22 +1,29 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "header.h" #include "raw.h" +#include "file/path.h" +#include "file/utils.h" #include "file/config.h" #include "file/nifti_utils.h" +#include "file/json_utils.h" +#include "file/gz.h" +#include "image_io/default.h" +#include "image_io/gz.h" namespace MR { @@ -25,11 +32,560 @@ namespace MR namespace NIfTI { + namespace { + template struct Type { NOMEMALIGN + using code_type = int16_t; + using float_type = float32; + using dim_type = int16_t; + using vox_offset_type = float32; + static constexpr bool is_version2 = false; + static const char* signature_extra() { return "\0\0\0\0"; } + static const char* magic1() { return "n+1\0"; } + static const char* magic2() { return "ni1\0"; } + static const std::string version() { return "NIFTI-1.1"; } + static const char* db_name (const NiftiHeader& NH) { return NH.db_name; } + static char* db_name (NiftiHeader& NH) { return NH.db_name; } + static int* extents (NiftiHeader& NH) { return &NH.extents; } + static char* regular (NiftiHeader& NH) { return &NH.regular; } + }; + + template <> struct Type { NOMEMALIGN + using code_type = int32_t; + using float_type = float64; + using dim_type = int64_t; + using vox_offset_type = int64_t; + static constexpr bool is_version2 = true; + static const char* signature_extra() { return "\r\n\032\n"; } + static const char* magic1() { return "n+2\0"; } + static const char* magic2() { return "ni2\0"; } + static const std::string version() { return "NIFTI-2"; } + static const char* db_name (const nifti_2_header& NH) { return nullptr; } + static char* db_name (nifti_2_header& NH) { return nullptr; } + static int* extents (nifti_2_header& NH) { return nullptr; } + static char* regular (nifti_2_header& NH) { return nullptr; } + }; + + const vector suffixes { ".nii", ".img" }; + } + bool right_left_warning_issued = false; + + template + size_t fetch (Header& H, const NiftiHeader& NH) + { + const std::string& version = Type::version(); + using dim_type = typename Type::dim_type; + using code_type = typename Type::code_type; + using float_type = typename Type::float_type; + + bool is_BE = false; + if (Raw::fetch_ (&NH.sizeof_hdr, is_BE) != sizeof(NH)) { + is_BE = true; + if (Raw::fetch_ (&NH.sizeof_hdr, is_BE) != sizeof(NH)) + throw Exception ("image \"" + H.name() + "\" is not in " + version + + " format (sizeof_hdr != " + str(sizeof(NH)) + ")"); + } + + bool is_nifti = true; + if (memcmp (NH.magic, Type::magic1(), 4) && memcmp (NH.magic, Type::magic2(), 4)) { + if (Type::is_version2) { + throw Exception ("image \"" + H.name() + "\" is not in " + version + " format (invalid magic signature)"); + } + else { + is_nifti = false; + DEBUG ("assuming image \"" + H.name() + "\" is in AnalyseAVW format."); + } + } + + if (Type::is_version2) { + if (memcmp (NH.magic+4, Type::signature_extra(), 4)) + WARN ("possible file transfer corruption of file \"" + H.name() + "\" (invalid magic signature)"); + } + else{ + char db_name[19]; + strncpy (db_name, Type::db_name (NH), 18); + if (db_name[0]) { + db_name[18] = '\0'; + add_line (H.keyval()["comments"], db_name); + } + } + + + + DataType dtype; + switch (Raw::fetch_ (&NH.datatype, is_BE)) { + case DT_BINARY: + dtype = DataType::Bit; + break; + case DT_INT8: + dtype = DataType::Int8; + break; + case DT_UINT8: + dtype = DataType::UInt8; + break; + case DT_INT16: + dtype = DataType::Int16; + break; + case DT_UINT16: + dtype = DataType::UInt16; + break; + case DT_INT32: + dtype = DataType::Int32; + break; + case DT_UINT32: + dtype = DataType::UInt32; + break; + case DT_INT64: + dtype = DataType::Int64; + break; + case DT_UINT64: + dtype = DataType::UInt64; + break; + case DT_FLOAT32: + dtype = DataType::Float32; + break; + case DT_FLOAT64: + dtype = DataType::Float64; + break; + case DT_COMPLEX64: + dtype = DataType::CFloat32; + break; + case DT_COMPLEX128: + dtype = DataType::CFloat64; + break; + default: + throw Exception ("unknown data type for " + version + " image \"" + H.name() + "\""); + } + + if (! (dtype.is (DataType::Bit) || dtype.is (DataType::UInt8) || dtype.is (DataType::Int8))) { + if (is_BE) + dtype.set_flag (DataType::BigEndian); + else + dtype.set_flag (DataType::LittleEndian); + } + + if (Raw::fetch_ (&NH.bitpix, is_BE) != (int16_t) dtype.bits()) + WARN ("bitpix field does not match data type in " + version + " image \"" + H.name() + "\" - ignored"); + + H.datatype() = dtype; + + + + + const int ndim = Raw::fetch_ (&NH.dim, is_BE); + if (ndim < 1) + throw Exception ("too few dimensions specified in NIfTI image \"" + H.name() + "\""); + if (ndim > 7) + throw Exception ("too many dimensions specified in NIfTI image \"" + H.name() + "\""); + H.ndim() = ndim; + for (int i = 0; i != ndim; i++) { + H.size(i) = Raw::fetch_ (&NH.dim[i+1], is_BE); + if (H.size (i) < 0) { + INFO ("dimension along axis " + str (i) + " specified as negative in NIfTI image \"" + H.name() + "\" - taking absolute value"); + H.size(i) = abs (H.size (i)); + } + if (!H.size (i)) + H.size(i) = 1; + H.stride(i) = i+1; + } + + // voxel sizes: + double pixdim[8]; + for (int i = 0; i < ndim; i++) { + pixdim[i] = Raw::fetch_ (&NH.pixdim[i+1], is_BE); + if (pixdim[i] < 0.0) { + INFO ("voxel size along axis " + str (i) + " specified as negative in NIfTI image \"" + H.name() + "\" - taking absolute value"); + pixdim[i] = abs (pixdim[i]); + } + H.spacing(i) = pixdim[i]; + } + + + // offset and scale: + H.intensity_scale() = Raw::fetch_ (&NH.scl_slope, is_BE); + if (std::isfinite (H.intensity_scale()) && H.intensity_scale() != 0.0) { + H.intensity_offset() = Raw::fetch_ (&NH.scl_inter, is_BE); + if (!std::isfinite (H.intensity_offset())) + H.intensity_offset() = 0.0; + } else { + H.reset_intensity_scaling(); + } + + + + const int64_t data_offset = Raw::fetch_ (&NH.vox_offset, is_BE); + + + + char descrip[81]; + strncpy (descrip, NH.descrip, 80); + if (descrip[0]) { + descrip[80] = '\0'; + if (strncmp (descrip, "MRtrix version: ", 16) == 0) + H.keyval()["mrtrix_version"] = descrip+16; + else + add_line (H.keyval()["comments"], descrip); + } + + // used to rescale voxel sizes in case of mismatch between pixdim and + // length of cosine vectors in sform: + bool rescale_voxel_sizes = false; + + + if (is_nifti) { + bool sform_code = Raw::fetch_ (&NH.sform_code, is_BE); + bool qform_code = Raw::fetch_ (&NH.qform_code, is_BE); + + if (sform_code) { + auto& M (H.transform().matrix()); + + M(0,0) = Raw::fetch_ (&NH.srow_x[0], is_BE); + M(0,1) = Raw::fetch_ (&NH.srow_x[1], is_BE); + M(0,2) = Raw::fetch_ (&NH.srow_x[2], is_BE); + M(0,3) = Raw::fetch_ (&NH.srow_x[3], is_BE); + + M(1,0) = Raw::fetch_ (&NH.srow_y[0], is_BE); + M(1,1) = Raw::fetch_ (&NH.srow_y[1], is_BE); + M(1,2) = Raw::fetch_ (&NH.srow_y[2], is_BE); + M(1,3) = Raw::fetch_ (&NH.srow_y[3], is_BE); + + M(2,0) = Raw::fetch_ (&NH.srow_z[0], is_BE); + M(2,1) = Raw::fetch_ (&NH.srow_z[1], is_BE); + M(2,2) = Raw::fetch_ (&NH.srow_z[2], is_BE); + M(2,3) = Raw::fetch_ (&NH.srow_z[3], is_BE); + + // check voxel sizes: + for (size_t axis = 0; axis != 3; ++axis) { + if (size_t(H.ndim()) > axis) { + if (abs(pixdim[axis]/M.col(axis).head<3>().norm() - 1.0) > 1e-5) { + WARN ("voxel spacings inconsistent between NIFTI s-form and header field pixdim"); + rescale_voxel_sizes = true; + break; + } + } + } + + // normalize each transform axis and rescale voxel sizes if + // needed: + for (size_t axis = 0; axis != 3; ++axis) { + if (size_t(H.ndim()) > axis) { + auto length = M.col(axis).head<3>().norm(); + M.col(axis).head<3>() /= length; + H.spacing(axis) = rescale_voxel_sizes ? length : pixdim[axis]; + } + } + + } + + if (qform_code) { + transform_type M_qform; + + Eigen::Quaterniond Q (0.0, + Raw::fetch_ (&NH.quatern_b, is_BE), + Raw::fetch_ (&NH.quatern_c, is_BE), + Raw::fetch_ (&NH.quatern_d, is_BE)); + const double w = 1.0 - Q.squaredNorm(); + if (w < 1.0e-7) + Q.normalize(); + else + Q.w() = std::sqrt (w); + M_qform.matrix().topLeftCorner<3,3>() = Q.matrix(); + + M_qform.translation()[0] = Raw::fetch_ (&NH.qoffset_x, is_BE); + M_qform.translation()[1] = Raw::fetch_ (&NH.qoffset_y, is_BE); + M_qform.translation()[2] = Raw::fetch_ (&NH.qoffset_z, is_BE); + + // qfac: + float qfac = Raw::fetch_ (&NH.pixdim[0], is_BE) >= 0.0 ? 1.0 : -1.0; + if (qfac < 0.0) + M_qform.matrix().col(2) *= qfac; + + //CONF option: NIfTIUseSform + //CONF default: 1 (true) + //CONF A boolean value to control whether, in cases where both + //CONF the sform and qform transformations are defined in an + //CONF input NIfTI image, but those transformations differ, the + //CONF sform transformation should be used in preference to the + //CONF qform matrix. The default is to use the sform matrix; + //CONF set to 0 / false to override and instead use the qform. + const bool use_sform = File::Config::get_bool ("NIfTIUseSform", true); + + if (sform_code) { + Header header2 (H); + header2.transform() = M_qform; + if (!voxel_grids_match_in_scanner_space (H, header2, 0.1)) + WARN ("qform and sform are inconsistent in NIfTI image \"" + H.name() + + "\" - using " + (use_sform ? "sform" : "qform")); + } + + if (!sform_code || !use_sform) { + H.transform() = M_qform; + H.spacing(0) = pixdim[0]; + H.spacing(1) = pixdim[1]; + H.spacing(2) = pixdim[2]; + } + } + + + + //CONF option: NIfTIAutoLoadJSON + //CONF default: 0 (false) + //CONF A boolean value to indicate whether, when opening NIfTI images, + //CONF any corresponding JSON file should be automatically loaded. + if (File::Config::get_bool ("NIfTIAutoLoadJSON", false)) { + std::string json_path = H.name(); + if (Path::has_suffix (json_path, ".nii.gz")) + json_path = json_path.substr (0, json_path.size()-7); + else if (Path::has_suffix (json_path, ".nii")) + json_path = json_path.substr (0, json_path.size()-4); + else + assert (0); + json_path += ".json"; + if (Path::exists (json_path)) + File::JSON::load (H, json_path); + } + } + else { + H.transform()(0,0) = std::numeric_limits::quiet_NaN(); + //CONF option: AnalyseLeftToRight + //CONF default: 0 (false) + //CONF A boolean value to indicate whether images in Analyse format + //CONF should be assumed to be in LAS orientation (default) or RAS + //CONF (when this is option is turned on). + if (!File::Config::get_bool ("AnalyseLeftToRight", false)) + H.stride(0) = -H.stride (0); + if (!File::NIfTI::right_left_warning_issued) { + INFO ("assuming Analyse images are encoded " + std::string (H.stride (0) > 0 ? "left to right" : "right to left")); + File::NIfTI::right_left_warning_issued = true; + } + } + + return data_offset; + } + + + + + + + template + void store (NiftiHeader& NH, const Header& H, const bool single_file) + { + const std::string& version = Type::version(); + using dim_type = typename Type::dim_type; + using vox_offset_type = typename Type::vox_offset_type; + using code_type = typename Type::code_type; + using float_type = typename Type::float_type; + + if (H.ndim() > 7) + throw Exception (version + " format cannot support more than 7 dimensions for image \"" + H.name() + "\""); + + bool is_BE = H.datatype().is_big_endian(); + + vector axes; + auto M = File::NIfTI::adjust_transform (H, axes); + + + memset (&NH, 0, sizeof (NH)); + + // magic number: + Raw::store (sizeof(NH), &NH.sizeof_hdr, is_BE); + + memcpy ( (char*) &NH.magic, single_file ? Type::magic1() : Type::magic2(), 4); + if (Type::is_version2) + memcpy ( (char*) &NH.magic+4, Type::signature_extra(), 4); + + if (!Type::is_version2) { + const auto hit = H.keyval().find("comments"); + auto comments = split_lines (hit == H.keyval().end() ? std::string() : hit->second); + strncpy ( Type::db_name (NH), comments.size() ? comments[0].c_str() : "untitled\0\0\0\0\0\0\0\0\0\0\0", 17); + Type::db_name (NH)[17] = '\0'; + Raw::store (16384, Type::extents (NH), is_BE); + *Type::regular(NH) = 'r'; + } + NH.dim_info = 0; + + // data type: + int16_t dt = 0; + switch (H.datatype()()) { + case DataType::Bit: + dt = DT_BINARY; + break; + case DataType::Int8: + dt = DT_INT8; + break; + case DataType::UInt8: + dt = DT_UINT8; + break; + case DataType::Int16LE: + case DataType::Int16BE: + dt = DT_INT16; + break; + case DataType::UInt16LE: + case DataType::UInt16BE: + dt = DT_UINT16; + break; + case DataType::Int32LE: + case DataType::Int32BE: + dt = DT_INT32; + break; + case DataType::UInt32LE: + case DataType::UInt32BE: + dt = DT_UINT32; + break; + case DataType::Int64LE: + case DataType::Int64BE: + dt = DT_INT64; + break; + case DataType::UInt64LE: + case DataType::UInt64BE: + dt = DT_UINT64; + break; + case DataType::Float32LE: + case DataType::Float32BE: + dt = DT_FLOAT32; + break; + case DataType::Float64LE: + case DataType::Float64BE: + dt = DT_FLOAT64; + break; + case DataType::CFloat32LE: + case DataType::CFloat32BE: + dt = DT_COMPLEX64; + break; + case DataType::CFloat64LE: + case DataType::CFloat64BE: + dt = DT_COMPLEX128; + break; + default: + throw Exception ("unknown data type for " + version + " image \"" + H.name() + "\""); + } + Raw::store (dt, &NH.datatype, is_BE); + + Raw::store (H.datatype().bits(), &NH.bitpix, is_BE); + + // data set dimensions: + Raw::store (H.ndim(), &NH.dim[0], is_BE); + { + size_t i = 0; + for (; i < 3; i++) + Raw::store (H.size (axes[i]), &NH.dim[i+1], is_BE); + for (; i < H.ndim(); i++) + Raw::store (H.size (i), &NH.dim[i+1], is_BE); + + // pad out the other dimensions with 1, fix for fslview + ++i; + for (; i < 8; i++) + Raw::store (1, &NH.dim[i], is_BE); + } + + Raw::store (1.0, &NH.pixdim[0], is_BE); + // voxel sizes: + for (size_t i = 0; i < 3; ++i) + Raw::store (H.spacing (axes[i]), &NH.pixdim[i+1], is_BE); + for (size_t i = 3; i < H.ndim(); ++i) + Raw::store (H.spacing (i), &NH.pixdim[i+1], is_BE); + + Raw::store (sizeof(NH)+4, &NH.vox_offset, is_BE); + + // offset and scale: + Raw::store (H.intensity_scale(), &NH.scl_slope, is_BE); + Raw::store (H.intensity_offset(), &NH.scl_inter, is_BE); + + std::string version_string = std::string("MRtrix version: ") + App::mrtrix_version; + if (App::project_version) + version_string += std::string(", project version: ") + App::project_version; + strncpy ( (char*) &NH.descrip, version_string.c_str(), 79); + + + // qform: + Eigen::Matrix3d R = M.matrix().topLeftCorner<3,3>(); + float qfac = 1.0; + if (R.determinant() < 0.0) { + R.col(2) = -R.col(2); + qfac = -1.0; + } + Eigen::Quaterniond Q (R); + + if (Q.w() < 0.0) + Q.vec() = -Q.vec(); + + if (R.isApprox (Q.matrix(), 1e-6)) { + Raw::store (NIFTI_XFORM_SCANNER_ANAT, &NH.qform_code, is_BE); + Raw::store (qfac, &NH.pixdim[0], is_BE); + + Raw::store (Q.x(), &NH.quatern_b, is_BE); + Raw::store (Q.y(), &NH.quatern_c, is_BE); + Raw::store (Q.z(), &NH.quatern_d, is_BE); + + Raw::store (M(0,3), &NH.qoffset_x, is_BE); + Raw::store (M(1,3), &NH.qoffset_y, is_BE); + Raw::store (M(2,3), &NH.qoffset_z, is_BE); + } + else { + WARN ("image \"" + H.name() + "\" contains non-rigid transform - qform will not be stored."); + Raw::store (NIFTI_XFORM_UNKNOWN, &NH.qform_code, is_BE); + } + + // sform: + Raw::store (NIFTI_XFORM_SCANNER_ANAT, &NH.sform_code, is_BE); + + Raw::store (H.spacing (axes[0]) * M(0,0), &NH.srow_x[0], is_BE); + Raw::store (H.spacing (axes[1]) * M(0,1), &NH.srow_x[1], is_BE); + Raw::store (H.spacing (axes[2]) * M(0,2), &NH.srow_x[2], is_BE); + Raw::store (M (0,3), &NH.srow_x[3], is_BE); + + Raw::store (H.spacing (axes[0]) * M(1,0), &NH.srow_y[0], is_BE); + Raw::store (H.spacing (axes[1]) * M(1,1), &NH.srow_y[1], is_BE); + Raw::store (H.spacing (axes[2]) * M(1,2), &NH.srow_y[2], is_BE); + Raw::store (M (1,3), &NH.srow_y[3], is_BE); + + Raw::store (H.spacing (axes[0]) * M(2,0), &NH.srow_z[0], is_BE); + Raw::store (H.spacing (axes[1]) * M(2,1), &NH.srow_z[1], is_BE); + Raw::store (H.spacing (axes[2]) * M(2,2), &NH.srow_z[2], is_BE); + Raw::store (M (2,3), &NH.srow_z[3], is_BE); + + if (Type::is_version2) { + const char xyzt_units[4] { NIFTI_UNITS_MM, NIFTI_UNITS_MM, NIFTI_UNITS_MM, NIFTI_UNITS_SEC }; + const int32_t* const xyzt_units_as_int_ptr = reinterpret_cast(xyzt_units); + Raw::store (*xyzt_units_as_int_ptr, &NH.xyzt_units, is_BE); + } + else + NH.xyzt_units = SPACE_TIME_TO_XYZT (NIFTI_UNITS_MM, NIFTI_UNITS_SEC); + + //CONF option: NIfTIAutoSaveJSON + //CONF default: 0 (false) + //CONF A boolean value to indicate whether, when writing NIfTI images, + //CONF a corresponding JSON file should be automatically created in order + //CONF to save any header entries that cannot be stored in the NIfTI + //CONF header. + if (single_file && File::Config::get_bool ("NIfTIAutoSaveJSON", false)) { + std::string json_path = H.name(); + if (Path::has_suffix (json_path, ".nii.gz")) + json_path = json_path.substr (0, json_path.size()-7); + else if (Path::has_suffix (json_path, ".nii")) + json_path = json_path.substr (0, json_path.size()-4); + else + assert (0); + json_path += ".json"; + File::JSON::save (H, json_path, H.name()); + } + } + + + + + + + + + + + transform_type adjust_transform (const Header& H, vector& axes) { Stride::List strides = Stride::get (H); @@ -63,10 +619,23 @@ namespace MR + bool check (int VERSION, Header& H, const size_t num_axes, const vector& suffixes) + { + if (!Path::has_suffix (H.name(), suffixes)) + return false; + if (version (H) != VERSION) + return false; + + std::string format = VERSION == 1 ? Type::version() : Type::version(); + + if (num_axes < 3) + throw Exception ("cannot create " + format + " image with less than 3 dimensions"); + if (num_axes > 7) + throw Exception ("cannot create " + format + " image with more than 7 dimensions"); + + H.ndim() = num_axes; - void check (Header& H, const bool is_analyse) - { for (size_t i = 0; i < H.ndim(); ++i) if (H.size (i) < 1) H.size(i) = 1; @@ -81,20 +650,6 @@ namespace MR H.stride(n) += H.stride(n) > 0 ? max_spatial_stride : -max_spatial_stride; Stride::symbolise (H); - // if .img, reset all strides to defaults, since it can't be assumed - // that downstream software will be able to parse the NIfTI transform - if (is_analyse) { - for (size_t i = 0; i < H.ndim(); ++i) - H.stride(i) = i+1; - bool analyse_left_to_right = File::Config::get_bool ("Analyse.LeftToRight", false); - if (analyse_left_to_right) - H.stride(0) = -H.stride (0); - - if (!right_left_warning_issued) { - INFO ("assuming Analyse images are encoded " + std::string (analyse_left_to_right ? "left to right" : "right to left")); - right_left_warning_issued = true; - } - } // by default, prevent output of bitwise data in NIfTI, since most 3rd // party software package can't handle them @@ -105,17 +660,149 @@ namespace MR //CONF data is permitted (most 3rd party software packages don't //CONF support bitwise data). If false (the default), data will be //CONF stored using more widely supported unsigned 8-bit integers. - if (H.datatype() == DataType::Bit) + if (H.datatype() == DataType::Bit) if (!File::Config::get_bool ("NIfTIAllowBitwise", false)) H.datatype() = DataType::UInt8; + + return true; + } + + + + namespace { + template struct Get { NOMEMALIGN using type = nifti_1_header; }; + template <> struct Get<2> { NOMEMALIGN using type = nifti_2_header; }; } + template + std::unique_ptr read (Header& H) + { + using nifti_header = typename Get::type; + + if (!Path::has_suffix (H.name(), ".nii") && !Path::has_suffix (H.name(), ".img")) + return std::unique_ptr(); + + const bool single_file = Path::has_suffix (H.name(), ".nii"); + const std::string header_path = single_file ? H.name() : H.name().substr (0, H.name().size()-4) + ".hdr"; + + try { + File::MMap fmap (header_path); + const size_t data_offset = fetch (H, * ( (const nifti_header*) fmap.address())); + std::unique_ptr handler (new ImageIO::Default (H)); + handler->files.push_back (File::Entry (H.name(), ( single_file ? data_offset : 0 ))); + return std::move (handler); + } catch (...) { + return std::unique_ptr(); + } + } + - size_t version (Header& H) + template + std::unique_ptr read_gz (Header& H) + { + using nifti_header = typename Get::type; + + if (!Path::has_suffix (H.name(), ".nii.gz")) + return std::unique_ptr(); + + nifti_header NH; + File::GZ zf (H.name(), "rb"); + zf.read (reinterpret_cast (&NH), sizeof(NH)); + zf.close(); + + try { + const size_t data_offset = fetch (H, NH); + std::unique_ptr io_handler (new ImageIO::GZ (H, data_offset)); + memcpy (io_handler.get()->header(), &NH, sizeof(NH)); + memset (io_handler.get()->header() + sizeof(NH), 0, sizeof(nifti1_extender)); + io_handler->files.push_back (File::Entry (H.name(), data_offset)); + return std::move (io_handler); + } catch (...) { + return std::unique_ptr(); + } + } + + + + + template + std::unique_ptr create (Header& H) + { + using nifti_header = typename Get::type; + const std::string& version = Type::version(); + + if (H.ndim() > 7) + throw Exception (version + " format cannot support more than 7 dimensions for image \"" + H.name() + "\""); + + const bool single_file = Path::has_suffix (H.name(), ".nii"); + const std::string header_path = single_file ? H.name() : H.name().substr (0, H.name().size()-4) + ".hdr"; + + nifti_header NH; + store (NH, H, single_file); + File::OFStream out (header_path, std::ios::out | std::ios::binary); + out.write ( (char*) &NH, sizeof (nifti_header)); + nifti1_extender extender; + memset (extender.extension, 0x00, sizeof (nifti1_extender)); + out.write (extender.extension, sizeof (nifti1_extender)); + out.close(); + + const size_t data_offset = single_file ? sizeof(NH)+4 : 0; + + if (single_file) + File::resize (H.name(), data_offset + footprint(H)); + else + File::create (H.name(), footprint(H)); + + std::unique_ptr handler (new ImageIO::Default (H)); + handler->files.push_back (File::Entry (H.name(), data_offset)); + + return std::move (handler); + } + + + + + + template + std::unique_ptr create_gz (Header& H) + { + using nifti_header = typename Get::type; + const std::string& version = Type::version(); + + if (H.ndim() > 7) + throw Exception (version + " format cannot support more than 7 dimensions for image \"" + H.name() + "\""); + + std::unique_ptr io_handler (new ImageIO::GZ (H, sizeof(nifti_header)+4)); + nifti_header& NH = *reinterpret_cast (io_handler->header()); + + store (NH, H, true); + memset (io_handler->header()+sizeof(nifti_header), 0, sizeof(nifti1_extender)); + + File::create (H.name()); + io_handler->files.push_back (File::Entry (H.name(), sizeof(nifti_header)+4)); + + return std::move (io_handler); + } + + + + // force explicit instantiation: + template std::unique_ptr read<1> (Header& H); + template std::unique_ptr read<2> (Header& H); + template std::unique_ptr read_gz<1> (Header& H); + template std::unique_ptr read_gz<2> (Header& H); + template std::unique_ptr create<1> (Header& H); + template std::unique_ptr create<2> (Header& H); + template std::unique_ptr create_gz<1> (Header& H); + template std::unique_ptr create_gz<2> (Header& H); + + + + int version (Header& H) { //CONF option: NIfTIAlwaysUseVer2 //CONF default: 0 (false) @@ -140,6 +827,17 @@ namespace MR } + std::string get_json_path (const std::string & nifti_path) { + std::string json_path; + if (Path::has_suffix (nifti_path, ".nii.gz")) + json_path = nifti_path.substr (0, nifti_path.size()-7); + else if (Path::has_suffix (nifti_path, ".nii")) + json_path = nifti_path.substr (0, nifti_path.size()-4); + else + assert (0); + return json_path + ".json"; + } + } } diff --git a/core/file/nifti_utils.h b/core/file/nifti_utils.h index 91eb7e9d10..debaafc279 100644 --- a/core/file/nifti_utils.h +++ b/core/file/nifti_utils.h @@ -1,22 +1,28 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_nifti_utils_h__ #define __file_nifti_utils_h__ #include "types.h" +#include "raw.h" +#include "header.h" +#include "file/config.h" +#include "file/nifti1.h" +#include "file/nifti2.h" namespace MR { @@ -26,13 +32,20 @@ namespace MR { namespace NIfTI { - extern bool right_left_warning_issued; transform_type adjust_transform (const Header& H, vector& order); - void check (Header& H, const bool is_analyse); - size_t version (Header& H); + bool check (int VERSION, Header& H, const size_t num_axes, const vector& suffixes); + + template std::unique_ptr read (Header& H); + template std::unique_ptr read_gz (Header& H); + + template std::unique_ptr create (Header& H); + template std::unique_ptr create_gz (Header& H); + + int version (Header& H); + std::string get_json_path (const std::string & nifti_path); } } diff --git a/core/file/ofstream.cpp b/core/file/ofstream.cpp index 5486e3fd7f..5668fc5f90 100644 --- a/core/file/ofstream.cpp +++ b/core/file/ofstream.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #include "file/ofstream.h" #include "file/utils.h" diff --git a/core/file/ofstream.h b/core/file/ofstream.h index 4eab3940d3..ce6eaaaf03 100644 --- a/core/file/ofstream.h +++ b/core/file/ofstream.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_ofstream_h__ #define __file_ofstream_h__ @@ -38,11 +39,11 @@ namespace MR class OFStream : public std::ofstream { NOMEMALIGN public: OFStream() { } - OFStream (const std::string& path, const std::ios_base::openmode mode = std::ios_base::out) { + OFStream (const std::string& path, const std::ios_base::openmode mode = std::ios_base::out | std::ios_base::binary) { open (path, mode); } - void open (const std::string& path, const std::ios_base::openmode mode = std::ios_base::out); + void open (const std::string& path, const std::ios_base::openmode mode = std::ios_base::out | std::ios_base::binary); }; diff --git a/core/file/path.h b/core/file/path.h index cd62ecdb64..cab77fd6ce 100644 --- a/core/file/path.h +++ b/core/file/path.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_path_h__ #define __file_path_h__ @@ -24,15 +25,30 @@ #include #include #include +#include -#include "types.h" #include "exception.h" +#include "mrtrix.h" +#include "types.h" #define HOME_ENV "HOME" + +/*! \def PATH_SEPARATORS + * \brief symbols used for separating directories in filesystem paths + * + * The PATH_SEPARATORS macro contains all characters that may be used + * to delimit directory / file names in a filesystem path. On + * POSIX-compliant systems, this is simply the forward-slash character + * '/'; on Windows however, either forward-slashes or back-slashes + * can appear. Therefore any code that performs such direct + * manipulation of filesystem paths should both use this macro, and + * be written accounting for the possibility of this string containing + * either one or two characters depending on the target system. */ #ifdef MRTRIX_WINDOWS -#define PATH_SEPARATOR "\\/" +// Preferentially use forward-slash when inserting via PATH_SEPARATORS[0] +#define PATH_SEPARATORS "/\\" #else -#define PATH_SEPARATOR "/" +#define PATH_SEPARATORS "/" #endif @@ -43,28 +59,28 @@ namespace MR inline std::string basename (const std::string& name) { - size_t i = name.find_last_of (PATH_SEPARATOR); + size_t i = name.find_last_of (PATH_SEPARATORS); return (i == std::string::npos ? name : name.substr (i+1)); } inline std::string dirname (const std::string& name) { - size_t i = name.find_last_of (PATH_SEPARATOR); - return (i == std::string::npos ? std::string ("") : (i ? name.substr (0,i) : std::string (PATH_SEPARATOR))); + size_t i = name.find_last_of (PATH_SEPARATORS); + return (i == std::string::npos ? std::string ("") : (i ? name.substr (0,i) : std::string(1, PATH_SEPARATORS[0]))); } inline std::string join (const std::string& first, const std::string& second) { - if (first.empty()) + if (first.empty()) return second; - if (first[first.size()-1] != PATH_SEPARATOR[0] + if (first[first.size()-1] != PATH_SEPARATORS[0] #ifdef MRTRIX_WINDOWS - && first[first.size()-1] != PATH_SEPARATOR[1] + && first[first.size()-1] != PATH_SEPARATORS[1] #endif - ) - return first + PATH_SEPARATOR[0] + second; + ) + return first + PATH_SEPARATORS[0] + second; return first + second; } @@ -72,7 +88,13 @@ namespace MR inline bool exists (const std::string& path) { struct stat buf; - if (!stat (path.c_str(), &buf)) return true; +#ifdef MRTRIX_WINDOWS + const std::string stripped (strip (path, PATH_SEPARATORS, false, true)); + if (!stat (stripped.c_str(), &buf)) +#else + if (!stat (path.c_str(), &buf)) +#endif + return true; if (errno == ENOENT) return false; throw Exception (strerror (errno)); return false; @@ -82,7 +104,13 @@ namespace MR inline bool is_dir (const std::string& path) { struct stat buf; - if (!stat (path.c_str(), &buf)) return S_ISDIR (buf.st_mode); +#ifdef MRTRIX_WINDOWS + const std::string stripped (strip (path, PATH_SEPARATORS, false, true)); + if (!stat (stripped.c_str(), &buf)) +#else + if (!stat (path.c_str(), &buf)) +#endif + return S_ISDIR (buf.st_mode); if (errno == ENOENT) return false; throw Exception (strerror (errno)); return false; @@ -98,20 +126,31 @@ namespace MR return false; } + inline bool has_suffix (const std::string& name, const std::string& suffix) { - return (name.size() < suffix.size() ? - false : - name.substr (name.size()-suffix.size()) == suffix); + return name.size() >= suffix.size() && + name.compare(name.size() - suffix.size(), suffix.size(), suffix) == 0; } - inline bool has_suffix (const std::string&name, const std::initializer_list &suffix_list) + inline bool has_suffix (const std::string &name, const std::initializer_list &suffix_list) { - bool flag(false); + return std::any_of (suffix_list.begin(), + suffix_list.end(), + [&] (const std::string& suffix) { return has_suffix (name, suffix); }); + } - for(const auto& suffix : suffix_list) { flag = flag || has_suffix (name, suffix); } + inline bool has_suffix (const std::string &name, const vector &suffix_list) + { + return std::any_of (suffix_list.begin(), + suffix_list.end(), + [&] (const std::string& suffix) { return has_suffix (name, suffix); }); + } - return flag; + inline bool is_mrtrix_image (const std::string& name) + { + return strcmp(name.c_str(), std::string("-").c_str()) == 0 || + Path::has_suffix (name, {".mif", ".mih", ".mif.gz"}); } inline std::string cwd () @@ -153,7 +192,7 @@ namespace MR struct dirent* entry = readdir (p); if (entry) { ret = entry->d_name; - if (ret == "." || ret == "..") + if (ret == "." || ret == "..") ret = read_name(); } return ret; @@ -170,6 +209,19 @@ namespace MR DIR* p; }; + + + inline char delimiter (const std::string& filename) + { + if (Path::has_suffix (filename, ".tsv")) + return '\t'; + else if (Path::has_suffix (filename, ".csv")) + return ','; + else + return ' '; + } + + } } diff --git a/core/file/png.cpp b/core/file/png.cpp new file mode 100644 index 0000000000..2a951f78c5 --- /dev/null +++ b/core/file/png.cpp @@ -0,0 +1,363 @@ +/* Copyright (c) 2008-2019 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#ifdef MRTRIX_PNG_SUPPORT + +#include "file/png.h" + +#include +#include + +#include "app.h" +#include "datatype.h" +#include "exception.h" +#include "mrtrix.h" +#include "file/path.h" + + +namespace MR +{ + namespace File + { + namespace PNG + { + + + + Reader::Reader (const std::string& filename) : + png_ptr (NULL), + info_ptr (NULL), + width (0), + height (0), + bit_depth (0), + color_type (0), + channels (0) + { + FILE* infile = fopen (filename.c_str(), "rb"); + unsigned char sig[8]; + if (fread (sig, 1, 8, infile) < 8) + throw Exception ("error reading from PNG file \"" + filename + "\""); + const int sigcmp = png_sig_cmp(sig, 0, 8); + if (sigcmp) { + fclose (infile); + std::stringstream s; + for (size_t i = 0; i != 8; ++i) { + s << str(int(sig[i])) << " "; + } + Exception e ("Bad PNG signature in file \"" + filename + "\""); + e.push_back ("File signature: " + s.str()); + throw e; + } + if (!(png_ptr = png_create_read_struct (PNG_LIBPNG_VER_STRING, NULL, NULL, NULL))) { + fclose (infile); + throw Exception ("Unable to allocate memory for PNG read structure for image \"" + filename + "\""); + } + if (!(info_ptr = png_create_info_struct (png_ptr))) { + fclose (infile); + png_destroy_read_struct (&png_ptr, NULL, NULL); + throw Exception ("Unable to allocate memory for PNG info structure for image \"" + filename + "\""); + } + if (setjmp(png_jmpbuf(png_ptr))) { + fclose (infile); + png_destroy_read_struct (&png_ptr, &info_ptr, NULL); + throw Exception ("Fatal error reading PNG image \"" + filename + "\""); + } + png_init_io (png_ptr, infile); + png_set_sig_bytes (png_ptr, 8); + png_read_info (png_ptr, info_ptr); + png_get_IHDR (png_ptr, info_ptr, &width, &height, &bit_depth, &color_type, NULL, NULL, NULL); + + switch (color_type) { + case PNG_COLOR_TYPE_GRAY: + case PNG_COLOR_TYPE_GRAY_ALPHA: + case PNG_COLOR_TYPE_PALETTE: + case PNG_COLOR_TYPE_RGB: + case PNG_COLOR_TYPE_RGB_ALPHA: + break; + default: + throw Exception ("Invalid color type (" + str(color_type) + ") in PNG file \"" + filename + "\""); + } + switch (bit_depth) { + case 1: + case 2: + case 4: + case 8: + case 16: + break; + default: + throw Exception ("Invalid bit depth (" + str(bit_depth) + ") in PNG file \"" + filename + "\""); + } + + output_bitdepth = bit_depth; + + if ((color_type == PNG_COLOR_TYPE_PALETTE) || + (color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8 && !(bit_depth == 1 && !(width%8))) || + (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS))) { + set_expand(); + } + png_set_interlace_handling (png_ptr); + png_read_update_info (png_ptr, info_ptr); + channels = png_get_channels (png_ptr, info_ptr); + DEBUG ("PNG image \"" + filename + "\": " + + str(width) + "x" + str(height) + "; " + + "bitdepth = " + str(bit_depth) + "; " + + "colortype: " + str(color_type) + "; " + + "channels = " + str(channels) + "; " + + "bytes per row = " + str(png_get_rowbytes(png_ptr, info_ptr)) + "; " + + "output bitdepth = " + str(output_bitdepth) + "; " + + "total bytes = " = str(get_size())); + } + + + + Reader::~Reader() + { + if (png_ptr && info_ptr) { + png_destroy_read_struct (&png_ptr, &info_ptr, NULL); + png_ptr = NULL; + info_ptr = NULL; + } + } + + + + void Reader::set_expand() + { + png_set_expand (png_ptr); + output_bitdepth = std::max (8, bit_depth); + } + + + + void Reader::load (uint8_t* image_data) + { + if (setjmp(png_jmpbuf(png_ptr))) { + png_destroy_read_struct (&png_ptr, &info_ptr, NULL); + throw Exception ("Fatal error reading PNG image"); + } + const int row_bytes = png_get_rowbytes (png_ptr, info_ptr); + png_bytepp row_pointers = new png_bytep[height]; + for (png_uint_32 i = 0; i != height; ++i) + row_pointers[i] = image_data + i*row_bytes; + png_read_image (png_ptr, row_pointers); + delete[] row_pointers; + row_pointers = NULL; + } + + + + + + + + + + + + + + + + + jmp_buf Writer::jmpbuf; + + + + Writer::Writer (const Header& H, const std::string& filename) : + png_ptr (NULL), + info_ptr (NULL), + color_type (0), + bit_depth (0), + filename (filename), + data_type (H.datatype()), + outfile (NULL) + { + if (Path::exists (filename) && !App::overwrite_files) + throw Exception ("output file \"" + filename + "\" already exists (use -force option to force overwrite)"); + if (!(png_ptr = png_create_write_struct (PNG_LIBPNG_VER_STRING, this, &error_handler, NULL))) + throw Exception ("Unable to create PNG write structure for image \"" + filename + "\""); + if (!(info_ptr = png_create_info_struct (png_ptr))) + throw Exception ("Unable to create PNG info structure for image \"" + filename + "\""); + if (setjmp (jmpbuf)) { + png_destroy_write_struct (&png_ptr, &info_ptr); + throw Exception ("Unable to set jump buffer for PNG structure for image \"" + filename + "\""); + } + outfile = fopen (filename.c_str(), "wb"); + png_init_io (png_ptr, outfile); + png_set_compression_level (png_ptr, Z_DEFAULT_COMPRESSION); + switch (H.ndim()) { + case 2: + case 3: + color_type = PNG_COLOR_TYPE_GRAY; + break; + case 4: + switch (H.size(3)) { + case 1: color_type = PNG_COLOR_TYPE_GRAY; break; + case 2: color_type = PNG_COLOR_TYPE_GRAY_ALPHA; break; + case 3: color_type = PNG_COLOR_TYPE_RGB; break; + case 4: color_type = PNG_COLOR_TYPE_RGB_ALPHA; break; + default: + png_destroy_write_struct (&png_ptr, &info_ptr); + throw Exception ("Unsupported number of volumes (" + str(H.size(3)) + ") in image \"" + filename + "\" for PNG writer"); + } + break; + default: + png_destroy_write_struct (&png_ptr, &info_ptr); + throw Exception ("Unsupported image dimensionality (" + str(H.ndim()) + ") in image \"" + filename + "\" for PNG writer"); + } + if (data_type.is_complex()) { + png_destroy_write_struct (&png_ptr, &info_ptr); + throw Exception ("Complex datatype from image \"" + H.name() + "\" not supported by PNG format"); + } + if (data_type.is_floating_point()) { + INFO ("Data to be converted to PNG is floating-point; " + "image will be scaled to integer representation assuming input data is in the range 0.0 - 1.0"); + } + switch (data_type() & DataType::Type) { + case DataType::Undefined: + png_destroy_write_struct (&png_ptr, &info_ptr); + throw Exception ("Undefined data type in image \"" + H.name() + "\" for PNG writer"); + case DataType::Bit: + bit_depth = 1; + break; + case DataType::UInt8: + case DataType::Float32: + bit_depth = 8; + break; + case DataType::UInt16: + case DataType::UInt32: + case DataType::UInt64: + case DataType::Float64: + bit_depth = 16; + break; + } + // Detect cases where one axis has a size of 1, and hence represents the image plane + // being set via selection of a single slice rather than axis permutation + // Note that H may be 3D or 4D, all with non-unity size, in which case + // axis 2 forms the image plane + size_t width_axis = 0, height_axis = 1; + if (H.ndim() > 2 && H.size(2) != 1) { + if (H.size(0) == 1 && H.size(1) != 1) { + width_axis = 1; + height_axis = 2; + } else if (H.size(1) == 1 && H.size(0) != 1) { + width_axis = 0; + height_axis = 2; + } + } + width = H.size (width_axis); + height = H.size (height_axis); + png_set_IHDR (png_ptr, info_ptr, width, height, bit_depth, color_type, PNG_INTERLACE_NONE, + PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); + png_set_gAMA (png_ptr, info_ptr, 1.0); + png_time modtime; + png_convert_from_time_t (&modtime, std::chrono::system_clock::to_time_t (std::chrono::system_clock::now())); + png_set_tIME (png_ptr, info_ptr, &modtime); + png_text text[4]; + const std::string title_key ("Title"); + const std::string title_text (filename); + const std::string software_key ("Software"); + const std::string software_text ("MRtrix3"); + const std::string source_key ("Source"); + const std::string source_text (MR::App::mrtrix_version); + const std::string url_key ("URL"); + const std::string url_text ("www.mrtrix.org"); + text[0].compression = PNG_TEXT_COMPRESSION_NONE; + text[0].key = const_cast (title_key.c_str()); + text[0].text = const_cast (title_text.c_str()); + text[1].compression = PNG_TEXT_COMPRESSION_NONE; + text[1].key = const_cast (software_key.c_str()); + text[1].text = const_cast (software_text.c_str()); + text[2].compression = PNG_TEXT_COMPRESSION_NONE; + text[2].key = const_cast (source_key.c_str()); + text[2].text = const_cast (source_text.c_str()); + text[3].compression = PNG_TEXT_COMPRESSION_NONE; + text[3].key = const_cast (url_key.c_str()); + text[3].text = const_cast (url_text.c_str()); + png_set_text (png_ptr, info_ptr, text, 4); + png_write_info (png_ptr, info_ptr); + // Note: png_set_packing NOT being called; this will result in 8 pixels per byte for mask images + // (if this function _were_ called, it would instead be spread over 1 byte per pixel) + } + + + + Writer::~Writer() + { + if (png_ptr && info_ptr) { + png_destroy_write_struct (&png_ptr, &info_ptr); + png_ptr = NULL; + info_ptr = NULL; + } + } + + + + void Writer::save (uint8_t* data) + { + if (setjmp (jmpbuf)) { + png_destroy_write_struct (&png_ptr, &info_ptr); + png_ptr = NULL; + info_ptr = NULL; + throw Exception ("Unable to set jump buffer for PNG structure for image \"" + filename + "\""); + } + const size_t row_bytes = png_get_rowbytes (png_ptr, info_ptr); + + auto finish = [&] (uint8_t* to_write) + { + png_bytepp row_pointers = new png_bytep[height]; + for (size_t row = 0; row != height; ++row) + row_pointers[row] = to_write + row * row_bytes; + png_write_image (png_ptr, row_pointers); + png_write_end (png_ptr, info_ptr); + }; + + + if (bit_depth == 1 || data_type == DataType::UInt8 || data_type == DataType::UInt16BE) { + finish (data); + } else { + uint8_t scratch[row_bytes * height]; + // Convert from "data" into "scratch" + // This may include changes to fundamental type, changes to bit depth, changes to endianness + uint8_t* in_ptr = data, *out_ptr = scratch; + const uint8_t channels = png_get_channels (png_ptr, info_ptr); + const size_t num_elements = channels * width * height; + switch (bit_depth) { + case 8: fill (in_ptr, out_ptr, data_type, num_elements); break; + case 16: fill (in_ptr, out_ptr, data_type, num_elements); break; + default: assert (0); + } + finish (scratch); + } + } + + + + void Writer::error_handler (png_struct_def* data, const char* msg) + { + Exception e ("Encountered critical error during PNG write: "); + e.push_back (msg); + throw e; + } + + + + } + } +} + +#endif + diff --git a/core/file/png.h b/core/file/png.h new file mode 100644 index 0000000000..1f69ad1f50 --- /dev/null +++ b/core/file/png.h @@ -0,0 +1,125 @@ +/* Copyright (c) 2008-2019 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#ifndef __file_png_h__ +#define __file_png_h__ + +#ifdef MRTRIX_PNG_SUPPORT + +#include + +#include "header.h" +#include "raw.h" +#include "types.h" +#include "image_io/fetch_store.h" + +namespace MR +{ + namespace File + { + namespace PNG + { + + class Reader + { MEMALIGN(Reader) + public: + Reader (const std::string& filename); + ~Reader(); + + uint32_t get_width() const { return width; } + uint32_t get_height() const { return height; } + int get_bitdepth() const { return bit_depth; } + int get_colortype() const { return color_type; } + int get_channels() const { return channels; } + + size_t get_size() const { return png_get_rowbytes(png_ptr, info_ptr) * height; } + int get_output_bitdepth() const { return output_bitdepth; } + bool has_transparency() const { return png_get_valid (png_ptr, info_ptr, PNG_INFO_tRNS); } + + void set_expand(); + + void load (uint8_t*); + + private: + png_structp png_ptr; + png_infop info_ptr; + png_uint_32 width, height; + int bit_depth, color_type; + uint8_t channels; + + int output_bitdepth; + + }; + + + + class Writer + { MEMALIGN(Writer) + public: + Writer (const Header&, const std::string&); + ~Writer(); + + size_t get_size() const { return png_get_rowbytes(png_ptr, info_ptr) * height;} + + void save (uint8_t*); + + private: + png_structp png_ptr; + png_infop info_ptr; + uint32_t width, height; + int color_type, bit_depth; + std::string filename; + DataType data_type; + FILE* outfile; + + static void error_handler (png_struct_def*, const char*); + static jmp_buf jmpbuf; + + template + void fill (uint8_t* in_ptr, uint8_t* out_ptr, const DataType data_type, const size_t num_elements); + + }; + + + template + void Writer::fill (uint8_t* in_ptr, uint8_t* out_ptr, const DataType data_type, const size_t num_elements) + { + std::function fetch_func; + std::function store_func; + __set_fetch_store_functions (fetch_func, store_func, data_type); + default_type multiplier = 1.0; + switch (data_type() & DataType::Type) { + case DataType::Float32: multiplier = std::numeric_limits::max(); break; + case DataType::Float64: multiplier = std::numeric_limits::max(); break; + } + for (size_t i = 0; i != num_elements; ++i) { + Raw::store_BE (std::min (default_type(std::numeric_limits::max()), std::max (0.0, std::round(multiplier * fetch_func (in_ptr, 0, 0.0, 1.0)))), out_ptr); + in_ptr += data_type.bytes(); + out_ptr += sizeof(T); + } + }; + + + + } + } +} + +#endif +#endif + + + diff --git a/core/file/tiff.cpp b/core/file/tiff.cpp index 35ad69c5e8..8daa7f2970 100644 --- a/core/file/tiff.cpp +++ b/core/file/tiff.cpp @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifdef MRTRIX_TIFF_SUPPORT #include "file/tiff.h" diff --git a/core/file/tiff.h b/core/file/tiff.h index b36ff23574..d480a46721 100644 --- a/core/file/tiff.h +++ b/core/file/tiff.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_tiff_h__ #define __file_tiff_h__ diff --git a/core/file/utils.h b/core/file/utils.h index 6cc94615db..4296accc77 100644 --- a/core/file/utils.h +++ b/core/file/utils.h @@ -1,18 +1,19 @@ -/* - * Copyright (c) 2008-2018 the MRtrix3 contributors. +/* Copyright (c) 2008-2019 the MRtrix3 contributors. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/ + * file, You can obtain one at http://mozilla.org/MPL/2.0/. * - * MRtrix3 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. * - * For more details, see http://www.mrtrix.org/ + * For more details, see http://www.mrtrix.org/. */ - #ifndef __file_ops_h__ #define __file_ops_h__ @@ -53,17 +54,42 @@ namespace MR //CONF /tmp/, which is typically a RAM file system and should therefore //CONF be fast; but may cause issues on machines with little RAM //CONF capacity or where write-access to this location is not permitted. - //CONF Note that this setting does not influence the location in which - //CONF Python scripts construct their temporary directories; that is - //CONF determined based on config file option :option:`ScriptTmpDir`. - const std::string& tmpfile_dir () { - static const std::string __tmpfile_dir = File::Config::get ("TmpFileDir", + //CONF + //CONF Note that this location can also be manipulated using the + //CONF :envvar:`MRTRIX_TMPFILE_DIR` environment variable, without editing the + //CONF config file. Note also that this setting does not influence the + //CONF location in which Python scripts construct their scratch + //CONF directories; that is determined based on config file option + //CONF ScriptScratchDir. + + //ENVVAR name: MRTRIX_TMPFILE_DIR + //ENVVAR This has the same effect as the :option:`TmpFileDir` + //ENVVAR configuration file entry, and can be used to set the location of + //ENVVAR temporary files (as used in Unix pipes) for a single session, + //ENVVAR within a single script, or for a single command without + //ENVVAR modifying the configuration file. + const std::string __get_tmpfile_dir () { + const char* from_env_mrtrix = getenv ("MRTRIX_TMPFILE_DIR"); + if (from_env_mrtrix) + return from_env_mrtrix; + + const char* default_tmpdir = #ifdef MRTRIX_WINDOWS "." #else "/tmp" #endif - ); + ; + + const char* from_env_general = getenv ("TMPDIR"); + if (from_env_general) + default_tmpdir = from_env_general; + + return File::Config::get ("TmpFileDir", default_tmpdir); + } + + const std::string& tmpfile_dir () { + static const std::string __tmpfile_dir = __get_tmpfile_dir(); return __tmpfile_dir; } @@ -75,6 +101,13 @@ namespace MR //CONF suffix (depending on file type). Note that this prefix can also be //CONF manipulated using the `MRTRIX_TMPFILE_PREFIX` environment //CONF variable, without editing the config file. + + //ENVVAR name: MRTRIX_TMPFILE_PREFIX + //ENVVAR This has the same effect as the :option:`TmpFilePrefix` + //ENVVAR configuration file entry, and can be used to set the prefix for + //ENVVAR the name of temporary files (as used in Unix pipes) for a + //ENVVAR single session, within a single script, or for a single command + //ENVVAR without modifying the configuration file. const std::string __get_tmpfile_prefix () { const char* from_env = getenv ("MRTRIX_TMPFILE_PREFIX"); if (from_env) return from_env; @@ -86,25 +119,26 @@ namespace MR return __tmpfile_prefix; } + /* Config file options listed here so that they can be scraped by * generate_user_docs.sh and added to the list of config file options in * the documentation without modifying the script to read from the scripts * directory. */ - //CONF option: ScriptTmpDir + //CONF option: ScriptScratchDir //CONF default: `.` - //CONF The location in which to generate the temporary directories to be + //CONF The location in which to generate the scratch directories to be //CONF used by MRtrix Python scripts. By default they will be generated //CONF in the working directory. //CONF Note that this setting does not influence the location in which //CONF piped images and other temporary files are created by MRtrix3; //CONF that is determined based on config file option :option:`TmpFileDir`. - //CONF option: ScriptTmpPrefix + //CONF option: ScriptScratchPrefix //CONF default: `